def typing_ads_menu(search=""): import os import base64 searchlower = search.lower().strip() words = searchlower.split(" ") # List all previous searches (i.e. all cache files) prevsearches = [] for f in os.listdir(alp.storage()): filename, ext = os.path.splitext(f) if ext == ".cache": prevsearch = base64.urlsafe_b64decode(filename) prevsearchlower = prevsearch.lower() # Search for the words in the input query if searchlower == prevsearchlower: continue match = True for word in words: if word not in prevsearchlower: match = False break if not match: continue prevsearches.append( alp.Item(title="'" + prevsearch + "'", subtitle="Recall stored search '" + prevsearch + "'", valid="no", autocomplete=prevsearch + alfred_delim)) return [ alp.Item(title="Search for '" + search + "'", subtitle="Search for the current query", valid="no", autocomplete=search + alfred_delim) ] + prevsearches
def author_ads_menu(key="", authors=""): """Returns an Alfred context menu populated with authors""" # Split the string into single authors. authorlist = authors.split(" and ") # Populate the action list. actions = [] import urllib for a in authorlist: if a == "others": aitem = alp.Item( title="More authors", subtitle="Open the ADS page for all authors of the paper", arg=encode_arguments(type='url', value="http://adsabs.harvard.edu/abs/" + key) # ask author about bibid ) else: aitem = alp.Item(title=a, subtitle="Find more papers of author", valid="no", autocomplete="author:" + a + alfred_delim) actions.append(aitem) # And return. return actions
def set_token(q=""): if q == "": return alp.Item(title="Begin typing to set the ADS API token", subtitle="Current value is " + get_token_setting(), valid="no") else: return alp.Item(title="Set ADS API token to " + q, subtitle="Current value is " + get_token_setting(), arg=encode_arguments(type="setting", value={'token': q}, notification={ 'title': 'Setting changed', 'text': 'New API token is' + q }))
def item(route): url = 'https://app.hubspot.com%s' % route.get('path') return alp.Item(title=route.get('title'), subtitle=route.get('description'), arg=url, uid=route.get('path'), valid=True)
def format_results(dl): title = dl[1] passed = time.time() - dl[2] if passed >= 86400: mod = passed % 86400 t = (passed - mod) / 86400 s = "" if t == 1 else "s" tstr = "{0} day{1}".format(int(t), s) elif passed >= 3600: mod = passed % 3600 t = (passed - mod) / 3600 s = "" if t == 1 else "s" tstr = "{0} hour{1}".format(int(t), s) elif passed >= 60: mod = passed % 60 t = (passed - mod) / 60 s = "" if t == 1 else "s" tstr = "{0} minute{1}".format(int(t), s) else: s = "" if passed == 1 else "s" tstr = "{0} second{1}".format(int(passed), s) subtitle = "Downloaded: {0} ago. Color: {1}. Tags: {2}.".format( tstr, dl[3], dl[4]) return alp.Item(title=title, subtitle=subtitle, icon=dl[0], fileIcon=True, valid=True, arg=dl[0])
def bibitem_to_alpitem(bib, search): """Converts a dictionary result item to an alp item""" # Prepend the year to the subtitle if it's there. if 'eprint' in bib: subpre = bib['eprint'] + " " elif 'year' in bib: subpre = bib['year'] + " " else: subpre = "" # Append the journal to the subtitle if it's there. journaltext = bibitem_to_journaltext(bib) if journaltext != "": subpost = " (" + journaltext + ")" else: subpost = "" # Construct an alp item and return. return alp.Item( title=bib['title'].replace('\n', ' '), subtitle=subpre + authors_to_lastnames(bib['author']) + subpost, valid="no", # This is to fake the contextual menu. autocomplete=search + alfred_delim + bib['id'] + alfred_delim # Same here. )
def list_installables(query=None, paths=Installable.PATHS, types=Installable.TYPES): """ searches for Installables in 'path' and generates Alfred-Feedback Args: query: Filters the resuls using a substring search paths: List of paths that are searched for Installables types: List of types that are used for Installables Returns: Returns Alfred Feedback XML containing one Item per Installable """ apps = Installable.get_installables(paths, types) # Sort by Creation time; Newest come first apps = sorted(apps, key=lambda f: os.path.getctime(f.path), reverse=True) fb = [] for a in apps: if query and string.find(str(a).lower(), query.lower()) == -1: continue fb.append( alp.Item( **{ 'title': "Install %s" % os.path.splitext(str(a))[0], 'subtitle': "Install this %s" % a.ext.lstrip('.'), 'arg': a.path, 'filetype': a.path, })) alp.feedback(fb)
def query(keyword): account = get_account() if not account: item = alp.Item(title=u'您还没有设置旺旺帐号', subtitle=u'请先使用\'setww\'命令设置旺旺帐号', valid=False) alp.feedback([item]) return db = path + account + '/db/contact.db' if not os.path.exists(db): item = alp.Item(title=u'查询失败', subtitle=u'好友列表不存在或已加密,您可以尝试setww重新选择其他帐号', valid=False) alp.feedback([item]) return items = [] conn = sqlite3.connect(db) c = conn.cursor() keyword = '%%%s%%' % keyword.strip() try: c.execute( '''SELECT OID,* FROM contact WHERE nickname_pinyin LIKE ? LIMIT 10''', (keyword, )) result = c.fetchall() except: result = [] item = alp.Item(title=u'暂不支持中文查询,请使用拼音', valid=False) items.append(item) for index, value in enumerate(result): userid = value[2] nickname = value[4] signature = value[5] pinyin = value[6] item = alp.Item(title=u'旺旺 \'%s\'' % nickname, subtitle=signature, uid=str(index + 1), icon='%s%s/CustomHeadImages/%s.jpg' % (path, account, userid), arg='aliim:sendmsg?touid=%s' % userid, valid=True, autocomplete=pinyin) items.append(item) if len(items) == 0: item = alp.Item(title=u'没有查找到匹配好友', valid=False) items.append(item) alp.feedback(items)
def local_search(search=""): """Performs a local search""" import os words = search.lower().split(" ") ldir = get_local_dir() files = [] fileitems = [] # Get all files in the local directory. for (dirpath, dirnames, filenames) in os.walk(ldir): files.extend(filenames) break # Loop over them to fill 'fileitems'. for f in files: filename, ext = os.path.splitext(f) filenamelower = filename.lower() if ext != ".pdf": continue # Search for the words in the input query match = True for word in words: if word not in filenamelower: match = False break if not match: continue # Make the alp item. filenamesplit = filename.split(" - ") fileitems.append( alp.Item(title=filenamesplit[1], subtitle=filenamesplit[0], type='file', icon="com.adobe.pdf", fileType=True, uid=filename, arg=encode_arguments(type="open", value=os.path.join(ldir, f)))) # Lastly, append an alp item that searches INSPIRE fileitems.append( alp.Item(title="Search INSPIRE for '" + search + "'", subtitle="Searches the INSPIRE database", arg=encode_arguments(type="inspiresearch", value=search))) # And return. return alp.feedback(fileitems)
def ads_main(q=""): """Refers to ads search.""" search = q.decode('utf-8') num_delims = search.count(alfred_delim) searchsplit = search.split(alfred_delim) # If the user hasn't typed anything, give some instructions and the # option to open the settings menu. if (get_token_setting() == ''): result = [ alp.Item(title="No ADS API token", subtitle="Please set API token by ADS setting", valid="no", autocomplete="settings" + alfred_delim) ] if search.strip() == "": result = [ alp.Item(title="Search ADS", subtitle="Begin typing to search ADS", valid="no", autocomplete=""), alp.Item(title="Settings", subtitle="Change ADS's settings", valid="no", autocomplete="settings" + alfred_delim) ] # Settings menu. elif searchsplit[0] == "settings": result = settings_menu(searchsplit[1:]) # If the search has no delimiters the user is still typing the query: elif num_delims == 0: result = typing_ads_menu(searchsplit[0]) # Has the string one delimiter? Then perform a regular Inspire search. elif num_delims == 1: result = ads_search(searchsplit[0].strip()) # Are there two delimiters? Then it's a context menu. elif num_delims == 2: result = context_ads_menu(searchsplit[1], searchsplit[0]) # Three delimiters? Then it's an author search menu. elif num_delims == 3: result = author_ads_menu(searchsplit[1], searchsplit[2]) return alp.feedback(result)
def tag(tags): url = subprocess.check_output('pbpaste') # borrow from django regex = re.compile( r'^(?:http|ftp)s?://' # http:// or https:// r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain... r'localhost|' #localhost... r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip r'(?::\d+)?' # optional port r'(?:/?|[/?]\S+)$', re.IGNORECASE) if re.match(regex, url): description = alp.Item(title="Tag " + " ".join(tags), subtitle=url, valid=True, arg=" ".join(tags)) alp.feedback(description) else: notice = alp.Item(title="Please Copy URL to Clipboard", valid=False) alp.feedback(notice)
def list_accounts(): files = os.listdir(path) items = [] for f in files: if f.startswith('cn') and os.path.isdir(path + f) and os.path.isfile( path + f + '/db/contact.db'): item = alp.Item(title=u'设置帐号 \'%s\'' % f, subtitle='', arg=f, valid=True) items.append(item) if items.count == 0: item = alp.Item(title=u'没有找到可用帐号', subtitle=u'尚未登录过旺旺或者已登录旺旺好友列表已加密', valid=False) items.append(item) alp.feedback(items)
def handle_error(title, subtitle, icon="icon-no.png", debug=""): """ Output an error message in a form suitable for Alfred to show something. Send the error and any debug info supplied to the log file. """ i = alp.Item(title=title, subtitle=subtitle, icon=icon) alp.feedback(i) alp.log("Handled error: %s, %s\n%s" % (title, subtitle, debug)) sys.exit(0)
def list_instances(name): """Returns a list of instances with a given name""" items = [] try: if len(name) < 2: items.append( alp.Item( title='Searching', subtitle= 'Please type more then one character to start searching', valid=False)) else: ec2 = boto.connect_ec2() for r in ec2.get_all_instances(): groups = ';'.join([g.name or g.id for g in r.groups]) for instance in r.instances: instance_name = instance.tags.get( 'Name', instance.tags.get('name', '')) if not name.lower() in instance_name.lower(): continue if instance.public_dns_name: arg = 'ssh -i ~/.ssh/%s.pem %s\n' % ( instance.key_name, instance.public_dns_name) else: arg = 'ssh vpc\nssh %s\n' % instance.private_ip_address items.append( alp.Item(title=instance_name, subtitle='[%s]: %s' % (instance.id, groups), valid=True, arg=arg)) if len(items) == 0: items.append( alp.Item(title='No Results Found', subtitle='Please refine your search and try again')) except Exception, e: alp.log(str(e)) items = [ alp.Item(title='Problem Searching', subtitle='%s' % str(e).replace("'", ''), valid=False) ] alp.log(items[0].get())
def set_cache(q=""): if q == "": return alp.Item(title="Begin typing to set the cache timeout", subtitle="Current value is " + str(get_cache_setting()) + " days", valid="no") try: s = str(int(q)) except: s = "0" return alp.Item( title="Set cache timeout to " + s + " days", subtitle="Current value is " + str(get_cache_setting()) + " days", arg=encode_arguments(type="setting", value={'cache': int(s)}, notification={ 'title': 'Setting changed', 'text': 'Cache timeout set to ' + s + ' days' }))
def main_settings(): """Returns the main settings menu""" menuitems = [] # Option to clear the cache. menuitems.append( alp.Item(title="Clear cache", subtitle="Clears all cached searches", arg=encode_arguments(type="clearcache", notification={ 'title': 'Cache cleared', 'text': 'All saved results have been cleared' }))) # Option to change the cache setting menuitems.append( alp.Item(title="Change cache setting", subtitle="Set the time searches are cached", valid="no", autocomplete="settings" + alfred_delim + "setcache" + alfred_delim)) # Option to change the local directory menuitems.append( alp.Item( title="Change local directory", subtitle="Set local directory where PDFs are stored and searched", valid="no", autocomplete="settings" + alfred_delim + "setdir" + alfred_delim)) # Storing ADS token menuitems.append( alp.Item( title="Set ADS API token", subtitle= "Sign up ADS for the API token in https://ui.adsabs.harvard.edu/#user/settings/token", valid="no", autocomplete="settings" + alfred_delim + "ADSToken" + alfred_delim)) return menuitems
def ads_open(q=""): import alp import urllib item = alp.Item( title="Open ADS for search", subtitle="Search on the ADS website for '" + q + "'", arg=encode_arguments(type='url', value="http://ui.adsabs.harvard.edu/#search/" + urllib.urlencode({'q': q}))) return alp.feedback(item)
def get_items(arg): """ Get a list of alfred.Item objects """ items = [] for count, bookmark in enumerate(get_results(arg)): items.append( alp.Item(title=bookmark.title, subtitle=bookmark.path, arg=bookmark.path, icon="CloneRepoIcon.png")) return items
def query(keyword): keyword = '%' + keyword.strip() + '%' c.execute('''SELECT OID,* FROM jsapi WHERE name LIKE ? LIMIT 10''', (keyword, )) nameMatch = c.fetchall() c.execute('''SELECT OID,* FROM jsapi WHERE t2 LIKE ? LIMIT 10''', (keyword, )) t2Match = c.fetchall() allMatch = nameMatch + t2Match allMatch = uniMatch(allMatch)[0:10] items = [] if hasNewVersion(): item = alp.Item(title="The new js api workflow is avaliable", icon="update-icon.png", subtitle="please choose this to download", uid="0", valid=True, autocomplete="", arg="https://github.com/allenm/jsapi-workflow") items.append(item) for index, match in enumerate(allMatch): title = match[3] + ' (' + match[2] + ')' item = alp.Item(title=title, subtitle=match[4], uid=str(index + 1), valid=True, autocomplete=match[3], arg=match[5]) items.append(item) alp.feedback(items)
def author_menu(authors=""): """Returns an Alfred context menu populated with authors""" # Split the string into single authors. authorlist = authors.split(" and ") # Populate the action list. actions = [] for a in authorlist: if a == "others": aitem = alp.Item( title="More authors", subtitle="Open the INSPIRE page for all authors of the paper", arg=encode_arguments(type='inspirerecord', value=bibid)) else: aitem = alp.Item(title=a, subtitle="Find more papers of author", valid="no", autocomplete="find a " + a + alfred_delim) actions.append(aitem) # And return. return actions
def set_local_dir(q=""): """Sets the local directory""" import os actions = [] # Use mdfind to search for a directory within ~/ if q == "": actions.append( alp.Item(title="Begin typing to search for a directory", subtitle="All directories in " + os.path.expanduser("~") + " will be searched", valid="no")) else: mdfindresults = alp.find("-onlyin ~ 'kMDItemFSName=\"*" + q + "*\"c && kMDItemContentType==public.folder'") for mdfindresult in mdfindresults: actions.append( alp.Item(title=mdfindresult, subtitle="Set the local directory to " + mdfindresult, arg=encode_arguments( type="setting", value={'local_dir': mdfindresult}, notification={ 'title': 'Setting changed', 'text': 'Local directory set to ' + mdfindresult }))) # Also remind the user what the current directory is. actions.append( alp.Item(title="Current local directory", subtitle=get_local_dir(), valid="no")) return actions
def search(keywords): """ Performs a search with the given keywords in the database. Returns the found clips as Alfred Items (XML). """ global settings if len(keywords) < 1: return None conn = lite.connect(settings["sqliteDB"]) titles = [] urls = [] notes = [] for word in keywords: titles.append("Title LIKE \"%%%s%%\"" % word) urls.append("Url LIKE \"%%%s%%\"" % word) notes.append("Notes LIKE \"%%%s%%\"" % word) titles = " AND ".join(titles) urls = " AND ".join(urls) notes = " AND ".join(notes) sql = "SELECT Title, Subtitle, Url, AppUrl" sql += " FROM Clips" sql += " WHERE (%s) OR" % titles sql += " (%s) OR " % urls sql += " (%s)" % notes alfredItems = [] with conn: cur = conn.cursor() cur.execute(sql) rows = cur.fetchall() for row in rows: itemDict = dict(title=row[0], subtitle=row[1], arg=row[2], valid=True) item = alp.Item(**itemDict) alfredItems.append(item) return alfredItems
def fetchTimers(): if tkn is None: return 'Please set Token via \'tgl token <TOKEN>\'' else: timers = requests.get('https://www.toggl.com/api/v8/time_entries', auth=(tkn, 'api_token')) if timers.status_code == 200: items = [] for timer in timers.json: subtitle = 'No longer running' if timer['duration'] < 0: subtitle = 'Currently running' items.append( alp.Item(title=timer['description'], subtitle=subtitle, valid=timer['duration'] >= 0, arg='start %s' % timer['description'])) return alp.feedback(items)
def search(tags): conn = sqlite3.connect(alp.local(join=DB)) conn.row_factory = sqlite3.Row c = conn.cursor() rows = [] for tag in tags: c.execute(''' SELECT DISTINCT urls.* FROM urls JOIN tags ON tags.url_id = urls.id WHERE tags.tag LIKE ? ''', ('%'+tag+'%', )) rows += c.fetchall() items = [] for row in rows: icon = row['icon'] sha224 = hashlib.sha224(icon).hexdigest() icon_path = alp.local(join=os.path.join('icon_cache', sha224)) if not os.path.exists(icon_path): with open(icon_path, 'w') as f: f.write(icon) c.execute('SELECT * FROM tags WHERE url_id = ?', (row['id'],)) url_tags = c.fetchall() item = alp.Item( title=row['url'], subtitle=" ".join(map(lambda tag: tag['tag'], url_tags)), valid=True, icon=icon_path, arg=row['url'] ) items.append(item) alp.feedback(items)
#!/usr/bin/env python import sys import alp # calculate decimal number decimal = int(sys.argv[1], 2) # create associative array and create xml from it decimalDic = dict(title=str(decimal), subtitle="Decimal", uid="decimal", valid=True, arg=str(decimal), icon="icons/decimal.png") d = alp.Item(**decimalDic) # calculate octal number octal = oct(decimal)[1:] # create associative array and create xml from it octalDic = dict(title=str(octal), subtitle="Octal", uid="octal", valid=True, arg=str(octal), icon="icons/octal.png") o = alp.Item(**octalDic) # calculate hex number hexadec = hex(decimal)[2:] # create associative array and create xml from it hexDic = dict(title=str(hexadec),
#!/usr/bin/env python import sys import alp # calculate decimal number decimal = int(sys.argv[1], 16) # create associative array and create xml from it decimalDic = dict(title=str(decimal), subtitle="Decimal", uid="decimal", valid=True, arg=str(decimal), icon="icons/decimal.png") d = alp.Item(**decimalDic) # calculate binary number binary = bin(decimal)[2:].zfill(8) # create associative array and create xml from it binaryDic = dict(title=str(binary), subtitle="Binary", uid="binary", valid=True, arg=str(binary), icon="icons/binary.png") b = alp.Item(**binaryDic) # calculate octal number octal = oct(decimal)[1:] # create associative array and create xml from it octalDic = dict(title=str(octal),
import alp from alp.request import requests import json import sys from datetime import datetime, timedelta import time settings = alp.Settings() tkn = settings.get('token') ACTION_STRINGS = ['start', 'stop', 'token', 'timers', 'execute'] ACTIONS = [ alp.Item(title='Start Timer', subtitle='Start a new Toggl Timer', valid=False, autocomplete='start'), alp.Item(title='Stop Timer', subtitle='Stop the current Toggl Timer', valid=False, autocomplete='stop', arg='stop'), alp.Item(title='Set Token', subtitle='Set the current Toggl Token', valid=False, autocomplete='token'), alp.Item(title='Previous Timers', subtitle='Restart an old timer', valid=False, autocomplete='timers'), ]
def inspire_search(search=""): """Searches Inspire.""" import time import shutil import os import json import base64 # Path for the temporary bibtex results. tempf = os.path.join(alp.cache(), "results.bib") # Path for the temporary latest parsed results. lastf = os.path.join(alp.cache(), "lastresults.json") # Path for the permanent cache of the query. Note the urlsafe encode. savef = os.path.join(alp.storage(), base64.urlsafe_b64encode(search) + ".cache") # Check if cache file exists, and if it's not older than a week. try: # Get the modificiation time of the cache file. mt = os.path.getmtime(savef) # Get the current time. ct = time.time() # Is the difference in time less a number of days? Then use it as cache. usecache = ct - mt < (get_cache_setting() * 86400) except: # If the above fails (e.g. when the file doesn't exist), don't use cache. usecache = False if usecache: # Read the cache file and parse the JSON to a dictionary. with open(savef, "r") as f: bibitems = json.load(f) else: from bibtexparser.bparser import BibTexParser from pyinspire import pyinspire # Query Inspire and get the result in form of BibTeX. bibtex = pyinspire.get_text_from_inspire(search, "bibtex").encode('utf-8') # Write the BibTeX to a file. with open(tempf, "w") as f: f.write(bibtex) # Parse the BibTeX from the same file. with open(tempf, "r") as f: bp = BibTexParser(f) # Get the bibtex as a dictionary and remove any newlines. bibitems = map(remove_newlines, bp.get_entry_list()) # Save the dictionary to the cache file. with open(savef, "w") as f: json.dump(bibitems, f) # Copy the cache file to the file contained the lastest results. shutil.copy(savef, lastf) # Parse the result dictionary to alp items. alpitems = [] for bibitem in bibitems: alpitems.append(bibitem_to_alpitem(bibitem, search)) # No results? Then tell the user, and offer to search the Inspire website. if len(alpitems) == 0: import urllib alpitems.append( alp.Item(title="No results", subtitle="Search on the INSPIRE website for '" + search + "'", arg=encode_arguments( type='url', value="http://inspirehep.net/search?ln=en&" + urllib.urlencode({'p': search})))) # And return feedback for Alfred. return alpitems
def ads_search(search=""): """Searches ADS.""" import time import shutil import os import json import base64 # Path for the temporary bibtex results. tempf = os.path.join(alp.cache(), "results.bib") # Path for the temporary latest parsed results. lastf = os.path.join(alp.cache(), "lastresults.json") # Path for the permanent cache of the query. Note the urlsafe encode. savef = os.path.join(alp.storage(), base64.urlsafe_b64encode(search) + ".cache") # Check if cache file exists, and if it's not older than a week. try: # Get the modificiation time of the cache file. mt = os.path.getmtime(savef) # Get the current time. ct = time.time() # Is the difference in time less a number of days? Then use it as cache. usecache = ct - mt < (get_cache_setting() * 86400) except: # If the above fails (e.g. when the file doesn't exist), don't use cache. usecache = False if usecache: # Read the cache file and parse the JSON to a dictionary. with open(savef, "r") as f: bibitems = json.load(f) else: from bibtexparser.bparser import BibTexParser # from pyinspire import pyinspire # import ads.sandbox as ads import ads import urllib ads.config.token = get_token_setting() # Query ADS and get the result in form of BibTeX. alpitems = [] alpitems.append( alp.Item( title="Open ADS for search", subtitle="Search on the ADS website for '" + search + "'", arg=encode_arguments( type='url', value="http://ui.adsabs.harvard.edu/#search/" + urllib.urlencode({'q': search}) # value = search ))) # papers in ads try: ppp = (ads.SearchQuery(q=search, fl=['bibcode', 'bibtex'], rows=8)) # get the bibtex bibtex = "" bibcode = [] for pp in ppp: bibcode = bibcode + [pp.bibcode] # bibtex = bibtex + ads.ExportQuery(bibcode).execute() bibtex = bibtex + pp.bibtex.encode("utf-8") # bibtex = pyinspire.get_text_from_inspire(search,"bibtex").encode('utf-8') # Write the BibTeX to a file. with open(tempf, "w") as f: f.write(bibtex) # Parse the BibTeX from the same file. with open(tempf, "r") as f: bp = BibTexParser(f) # Get the bibtex as a dictionary and remove any newlines. bibitems = map(remove_newlines, bp.get_entry_list()) # Save the dictionary to the cache file. with open(savef, "w") as f: json.dump(bibitems, f) except: # import urllib alpitems = [] alpitems.append( alp.Item( title="Rate limit was exceed", subtitle="Search on the ADS website for '" + search + "'", arg=encode_arguments( type='url', value="http://ui.adsabs.harvard.edu/#search/" + urllib.urlencode({'q': search}) # value = search ))) return alpitems # Copy the cache file to the file contained the lastest results. shutil.copy(savef, lastf) # Parse the result dictionary to alp items. alpitems = [] for bibitem in bibitems: alpitems.append(bibitem_to_alpitem(bibitem, search)) # No results? Then tell the user, and offer to search the ADS website. # if len(alpitems) == 0: # And return feedback for Alfred. return alpitems
def context_ads_menu(key="", search=""): """Returns the context menu for ads result item""" # This method takes only the key (id) of the actioned item as an argument. # So we need to load the last results, and find the item of that key. # ADS should have adsurl import os import json import time bid = alp.bundle() + str(time.time()) # Load the parsed results from the latest Inspire search. lastf = os.path.join(alp.cache(), "lastresults.json") with open(lastf, "r") as f: items = json.load(f) # Lookup the item from the results. for i in items: if 'id' in i: if i['id'] == key: item = i break # Populate the context menu action list. actions = [] # Link to the ADS record page. # if 'adsurl' in item: actions.append( alp.Item(title=item['title'], subtitle="Open ADS record page in browser", arg=encode_arguments(type='url', value=item['adsurl']), uid=bid + "adsrecord")) # Author search. authors = item['author'].split(" and ") if len(authors) == 1: actions.append( alp.Item(title=item['author'], subtitle="Find more papers of author", valid="no", autocomplete="author: " + item['author'] + alfred_delim, uid=bid + "authors")) else: actions.append( alp.Item(title=authors_to_lastnames(item['author']), subtitle="Find more papers of authors", valid="no", autocomplete=search + alfred_delim + key + alfred_delim + item['author'] + alfred_delim, uid=bid + "authors")) # Link to resolve the DOI. if 'doi' in item: url = "http://dx.doi.org/" + item['doi'] actions.append( alp.Item(title=bibitem_to_journaltext(item), subtitle="Open DOI in browser", arg=encode_arguments(type='url', value=url), uid=bid + "doi")) # Next, the option to open the PDF from arXiv. if 'eprint' in item: #if item['archiveprefix'] != 'arXiv': # urlprefix = item['archiveprefix'] + "/" # prefix = urlprefix #else: urlprefix = "" prefix = 'arXiv:' url = "http://arxiv.org/pdf/" + urlprefix + item['eprint'] filename = os.path.join( get_local_dir(), (item['eprint'] + " " + authors_to_lastnames(item['author']) + " - " + item['title'] + '.pdf').replace('/', '_').replace(':', '_')) actions.append( alp.Item(title=prefix + item['eprint'], subtitle="Download and open PDF", arg=encode_arguments(type='getpdf', value=[url, filename]), uid=bid + "arxivpdf")) # The option to lookup references. actions.append( alp.Item(title="References", subtitle="Find papers that this paper cites", valid="no", autocomplete="references(bibcode:" + key + ")" + alfred_delim, uid=bid + "refs")) # The option to lookup citations. actions.append( alp.Item(title="Citations", subtitle="Find papers that cite this paper", valid="no", autocomplete="citations(bibcode:" + key + ")" + alfred_delim, uid=bid + "cites")) # The option to copy the bibtex of the current item to the clipboard. actions.append( alp.Item(title="BibTeX", subtitle="Copy BibTeX to clipboard", uid=bid + "bibtex", arg=encode_arguments(type='clipboard', value=bibitem_to_bibtex(item), notification={ 'title': 'Copied BibTeX to clipboard', 'text': 'BibTeX for ' + key + ' has been copied to the clipboard' }))) # And return. return actions