示例#1
0
    def __init__(self):
        routes = alp.jsonLoad(alp.local('routes.json'), [])
        try:
            config = alp.jsonLoad(alp.storage('config.json'))
        except Exception:
            config = {}
            alp.jsonDump(config, alp.storage('config.json'))

        self.hubid = config.get('hubid')

        alp_args = alp.args()
        args_len = len(alp_args)

        if args_len > 0:
            # Allow resetting HubId.
            config_mode = alp_args[0].isdigit()
            if self.hubid is None or config_mode:
                hubid = alp_args[0]

                return alp.feedback(alp.Item(title='Press Ctrl + Enter to set your HubId to %s' % hubid, arg=hubid, uid=hubid))

            search = alp_args[0].lower()
            routes = filter(lambda route: search in route.get('title').lower() or search in route.get('description', '').lower(), routes)
        elif self.hubid is None:
            return alp.feedback([config_item()])

        items = map(self.build_item, routes)
        return alp.feedback(items)
示例#2
0
def main():
    alp_args = alp.args()
    alp.log(alp_args)

    try:
        alp.jsonDump(dict(hubid=alp_args[0]), alp.storage('config.json'))
        alp.log('Setting json')
        alp.log(alp.jsonLoad(alp.storage('config.json')))
    except Exception as e:
        alp.log('Unable to save your configuration. Please try again.')
        alp.log(traceback.format_exc())
        raise e

    return
示例#3
0
def main():
    alp_args = alp.args()
    alp.log(alp_args)

    try:
        alp.jsonDump(dict(hubid=alp_args[0]), alp.storage('config.json'))
        alp.log('Setting json')
        alp.log(alp.jsonLoad(alp.storage('config.json')))
    except Exception as e:
        alp.log('Unable to save your configuration. Please try again.')
        alp.log(traceback.format_exc())
        raise e

    return
    def get_results(self, query):
        self.partial_query = query

        for _, dirnames, __ in os.walk(alp.storage(join='presets')):
            for subdirname in dirnames:
                self._add_item(
                    title=subdirname,
                    icon=self.ICON,
                    autocomplete=subdirname,
                    arg=json.dumps({
                        'action': 'load_preset',
                        'preset_name': subdirname,
                    }),
                )

        if not self.results:
            self._add_item(
                title='You have no saved presets!',
                subtitle='Use "-hue save-preset" to save the current lights state as a preset.',
                icon=self.ICON,
                valid=False,
            )

        self._filter_results()
        return self.results
示例#5
0
def typing_ads_menu(search=""):
    import os
    import base64

    searchlower = search.lower().strip()
    words = searchlower.split(" ")

    # List all previous searches (i.e. all cache files)
    prevsearches = []
    for f in os.listdir(alp.storage()):
        filename, ext = os.path.splitext(f)
        if ext == ".cache":
            prevsearch = base64.urlsafe_b64decode(filename)
            prevsearchlower = prevsearch.lower()
            # Search for the words in the input query
            if searchlower == prevsearchlower:
                continue
            match = True
            for word in words:
                if word not in prevsearchlower:
                    match = False
                    break
            if not match:
                continue
            prevsearches.append(
                alp.Item(title="'" + prevsearch + "'",
                         subtitle="Recall stored search '" + prevsearch + "'",
                         valid="no",
                         autocomplete=prevsearch + alfred_delim))
    return [
        alp.Item(title="Search for '" + search + "'",
                 subtitle="Search for the current query",
                 valid="no",
                 autocomplete=search + alfred_delim)
    ] + prevsearches
示例#6
0
def do_feedback():
    q = alp.args()
    flowPath = os.path.split(alp.local())[0]
    cache = alp.jsonLoad("cache.json", default={})
    day_secs = 24 * 60 * 60
    force = (len(q) > 0 and q[0] == "|force|")
    t = time.time()
    if (force):
        import shutil
        _c = alp.cache()
        _s = alp.storage()
        shutil.rmtree(_c)
        shutil.rmtree(_s)

    if (cache.get("cache_time", 0) + day_secs > t) and not force:
        candidates = cache.get("cached_workflows", [])
    else:
        candidates = []
        for dirpath, dirnames, filenames in os.walk(flowPath, topdown=False, followlinks=True):
            for aFile in filenames:
                if aFile == "update.json":
                    try:
                        fn = os.path.join(dirpath, "Info.plist")
                        if not os.path.exists(fn):
                            fn = os.path.join(dirpath, "info.plist")

                        plist = alp.readPlist(fn)
                    except IOError as e:
                        alp.log("Exception: Info.plist not found ({0}).".format(e))
                        continue
                    else:
                        name = plist["name"]
                        local_description = plist["description"]
                        the_json = os.path.join(dirpath, aFile)
                        the_icon = os.path.join(dirpath, "icon.png")
                        if name != "Alleyoop":
                            candidates.append(dict(name=name, json=the_json,
                                icon=the_icon, path=dirpath,
                                description=local_description))
                        else:
                            downloads_path = os.path.expanduser("~/Downloads/")
                            candidates.append(dict(name=name, json=the_json,
                                icon=the_icon, path=downloads_path,
                                description=local_description))
        new_cache = dict(cache_time=t, cached_workflows=candidates)
        alp.jsonDump(new_cache, "cache.json")

    threads = []
    for candidict in candidates:
        try:
            with codecs.open(candidict["json"]) as f:
                local = json.load(f, encoding="utf-8")
        except Exception as e:
            alp.log("{0} may no longer exist: {1}".format(candidict["name"], e))
            continue

        ot = OopThread(local['remote_json'], force, candidict, local)
        threads.append(ot)
        ot.start()
    manage_threads(threads)
示例#7
0
def typing_menu(search=""):
    import os
    import base64

    searchlower = search.lower().strip()
    words       = searchlower.split(" ")

    # List all previous searches (i.e. all cache files)
    prevsearches = []
    for f in os.listdir(alp.storage()):
        filename, ext = os.path.splitext(f)
        if ext == ".cache":
            prevsearch      = base64.urlsafe_b64decode(filename)
            prevsearchlower = prevsearch.lower()
            # Search for the words in the input query
            if searchlower == prevsearchlower:
                continue
            match = True
            for word in words:
                if word not in prevsearchlower:
                    match = False
                    break
            if not match:
                continue
            prevsearches.append(alp.Item(
                title        = "'" + prevsearch + "'",
                subtitle     = "Recall stored INSPIRE search '" + prevsearch + "'",
                valid        = "no",
                autocomplete = prevsearch + alfred_delim
            ))
    return [alp.Item(
        title        = "Search INSPIRE for '" + search + "'",
        subtitle     = "Search INSPIRE for the current query",
        valid        = "no",
        autocomplete = search + alfred_delim
    )] + prevsearches
示例#8
0
def do_feedback():
    q = alp.args()
    flowPath = os.path.split(alp.local())[0]
    cache = alp.jsonLoad("cache.json", default={})
    day_secs = 24 * 60 * 60
    force = (len(q) > 0 and q[0] == "|force|")
    t = time.time()
    if (force):
        import shutil
        _c = alp.cache()
        _s = alp.storage()
        shutil.rmtree(_c)
        shutil.rmtree(_s)

    if (cache.get("cache_time", 0) + day_secs > t) and not force:
        candidates = cache.get("cached_workflows", [])
    else:
        candidates = []
        for dirpath, dirnames, filenames in os.walk(flowPath,
                                                    topdown=False,
                                                    followlinks=True):
            for aFile in filenames:
                if aFile == "update.json":
                    try:
                        fn = os.path.join(dirpath, "Info.plist")
                        if not os.path.exists(fn):
                            fn = os.path.join(dirpath, "info.plist")

                        plist = alp.readPlist(fn)
                    except IOError as e:
                        alp.log(
                            "Exception: Info.plist not found ({0}).".format(e))
                        continue
                    else:
                        name = plist["name"]
                        local_description = plist["description"]
                        the_json = os.path.join(dirpath, aFile)
                        the_icon = os.path.join(dirpath, "icon.png")
                        if name != "Alleyoop":
                            candidates.append(
                                dict(name=name,
                                     json=the_json,
                                     icon=the_icon,
                                     path=dirpath,
                                     description=local_description))
                        else:
                            downloads_path = os.path.expanduser("~/Downloads/")
                            candidates.append(
                                dict(name=name,
                                     json=the_json,
                                     icon=the_icon,
                                     path=downloads_path,
                                     description=local_description))
        new_cache = dict(cache_time=t, cached_workflows=candidates)
        alp.jsonDump(new_cache, "cache.json")

    threads = []
    for candidict in candidates:
        try:
            with codecs.open(candidict["json"]) as f:
                local = json.load(f, encoding="utf-8")
        except Exception as e:
            alp.log("{0} may no longer exist: {1}".format(
                candidict["name"], e))
            continue

        ot = OopThread(local['remote_json'], force, candidict, local)
        threads.append(ot)
        ot.start()
    manage_threads(threads)
示例#9
0
def inspire_search(search=""):
    """Searches Inspire."""

    import time
    import shutil
    import os
    import json
    import base64

    # Path for the temporary bibtex results.
    tempf = os.path.join(alp.cache(),"results.bib")
    # Path for the temporary latest parsed results.
    lastf = os.path.join(alp.cache(),"lastresults.json")
    # Path for the permanent cache of the query. Note the urlsafe encode.
    savef = os.path.join(alp.storage(),base64.urlsafe_b64encode(search) + ".cache")

    # Check if cache file exists, and if it's not older than a week.
    try:
        # Get the modificiation time of the cache file.
        mt = os.path.getmtime(savef)
        # Get the current time.
        ct = time.time()
        # Is the difference in time less a number of days? Then use it as cache.
        usecache =  ct - mt < ( get_cache_setting() * 86400 )
    except:
        # If the above fails (e.g. when the file doesn't exist), don't use cache.
        usecache = False

    if usecache:
        # Read the cache file and parse the JSON to a dictionary.
        with open(savef,"r") as f:
            bibitems = json.load(f)
    else:
        from bibtexparser.bparser import BibTexParser
        from pyinspire import pyinspire
        # Query Inspire and get the result in form of BibTeX.
        bibtex = pyinspire.get_text_from_inspire(search,"bibtex").encode('utf-8')
        # Write the BibTeX to a file.
        with open(tempf,"w") as f:
            f.write(bibtex)
        # Parse the BibTeX from the same file.
        with open(tempf,"r") as f:
            bp = BibTexParser(f)
        # Get the bibtex as a dictionary and remove any newlines.
        bibitems = map(remove_newlines,bp.get_entry_list())
        # Save the dictionary to the cache file.
        with open(savef,"w") as f:
            json.dump(bibitems,f)

    # Copy the cache file to the file contained the lastest results.
    shutil.copy(savef,lastf)

    # Parse the result dictionary to alp items.
    alpitems = []
    for bibitem in bibitems:
        alpitems.append(bibitem_to_alpitem(bibitem,search))

    # No results? Then tell the user, and offer to search the Inspire website.
    if len(alpitems) == 0:
        import urllib
        alpitems.append(alp.Item(
            title       = "No results",
            subtitle    = "Search on the INSPIRE website for '" + search + "'",
            arg=encode_arguments(
                type    = 'url',
                value   = "http://inspirehep.net/search?ln=en&" + urllib.urlencode({'p':search})
            )
        ))

    # And return feedback for Alfred.
    return alpitems
示例#10
0
def main(q):

    # Decode the arguments passed by Alfred.
    # The result is a dictionary with keys 'type', 'value', and 'notification'.
    args = json.loads(base64.b64decode(q))

    t = args['type']
    v = args['value']
    n = args['notification']

    #
    # Act on the various types.
    #

    # Open an URL in the default browser.
    if t == 'url':
        import webbrowser
        webbrowser.open(v)

#    elif t == 'ads':
#        import scriptfilter.get_token_setting as get_token
#        import ads.sandbox as ads
#
#        ads.config.token = get_token()
#        pp = ads.SearchQuery(q=v)

    # Past to clipboard.
    elif t == 'clipboard':
        import os
        import alp
        import subprocess
        # Paste to the clipboard via the command line util 'pbcopy'.
        # First, write the data to a file which 'pbcopy' will read.
        cpf = os.path.join(alp.cache(),"clipboard.txt")
        with open(cpf, "w") as f:
            f.write(v)
        # Now call 'pbcopy'.
        subprocess.call('pbcopy < "' + cpf + '"',shell=True)

    # Lookup Inspire record.
    elif t == 'inspirerecord':

        import urllib
        import webbrowser
        import xml.etree.ElementTree as ET

        # First, get the URL for the record by querying Inspire.

        # Get XML data from Inspire.
        url = "http://inspirehep.net/rss?" + urllib.urlencode({'ln':'en','p':v})
        try:
            f = urllib.urlopen(url)
            xml = f.read()
            f.close()
        except:
            return
        # Parse the XML.
        e = ET.fromstring(xml)
        for item in e.iter('item'):
            for link in item.iter('link'):
                recordurl = link.text
                break
            break

        # Now open the URL.
        webbrowser.open(recordurl)

    elif t == 'clearcache':
        import os
        import alp

        # Remove cache files from storage folder.
        for f in os.listdir(alp.storage()):
            file_path = os.path.join(alp.storage(), f)
            try:
                if os.path.isfile(file_path):
                    if os.path.splitext(f)[-1] == ".cache":
                        os.remove(file_path)
            except Exception, e:
                pass
示例#11
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import alp
import sqlite3

clone_database = alp.storage(join="zotquery.sqlite")
conn = sqlite3.connect(clone_database)
cur = conn.cursor()	

#query = 'sem'

def title(query): 
	title_query = """
	select items.key
	from items, itemData, fields, itemDataValues, itemTypes
	where
		items.itemID = itemData.itemID
		and itemData.fieldID = fields.fieldID
		and itemData.valueID = itemDataValues.valueID
		and items.itemTypeID = itemTypes.itemTypeID
		and itemTypes.typeName != "attachment"
		and (fields.fieldName = "title"
			or fields.fieldName = "publicationTitle"
			or fields.fieldName = "seriesTitle"
			or fields.fieldName = "series")
		and itemDataValues.value LIKE '%{0}%'
	""".format(query)
	title_info = cur.execute(title_query).fetchall()
	
	keys_list = []
	for item in title_info:
示例#12
0
def ads_search(search=""):
    """Searches ADS."""

    import time
    import shutil
    import os
    import json
    import base64

    # Path for the temporary bibtex results.
    tempf = os.path.join(alp.cache(), "results.bib")
    # Path for the temporary latest parsed results.
    lastf = os.path.join(alp.cache(), "lastresults.json")
    # Path for the permanent cache of the query. Note the urlsafe encode.
    savef = os.path.join(alp.storage(),
                         base64.urlsafe_b64encode(search) + ".cache")

    # Check if cache file exists, and if it's not older than a week.
    try:
        # Get the modificiation time of the cache file.
        mt = os.path.getmtime(savef)
        # Get the current time.
        ct = time.time()
        # Is the difference in time less a number of days? Then use it as cache.
        usecache = ct - mt < (get_cache_setting() * 86400)
    except:
        # If the above fails (e.g. when the file doesn't exist), don't use cache.
        usecache = False

    if usecache:
        # Read the cache file and parse the JSON to a dictionary.
        with open(savef, "r") as f:
            bibitems = json.load(f)
    else:
        from bibtexparser.bparser import BibTexParser
        #   from pyinspire import pyinspire
        #        import ads.sandbox as ads
        import ads
        import urllib

        ads.config.token = get_token_setting()
        # Query ADS and get the result in form of BibTeX.
        alpitems = []
        alpitems.append(
            alp.Item(
                title="Open ADS for search",
                subtitle="Search on the ADS website for '" + search + "'",
                arg=encode_arguments(
                    type='url',
                    value="http://ui.adsabs.harvard.edu/#search/" +
                    urllib.urlencode({'q': search})
                    #                value   = search
                )))
        # papers in ads
        try:
            ppp = (ads.SearchQuery(q=search, fl=['bibcode', 'bibtex'], rows=8))
            # get the bibtex
            bibtex = ""
            bibcode = []
            for pp in ppp:
                bibcode = bibcode + [pp.bibcode]
                #                bibtex = bibtex + ads.ExportQuery(bibcode).execute()
                bibtex = bibtex + pp.bibtex.encode("utf-8")

    #        bibtex = pyinspire.get_text_from_inspire(search,"bibtex").encode('utf-8')
    # Write the BibTeX to a file.
            with open(tempf, "w") as f:
                f.write(bibtex)
            # Parse the BibTeX from the same file.
            with open(tempf, "r") as f:
                bp = BibTexParser(f)
            # Get the bibtex as a dictionary and remove any newlines.
            bibitems = map(remove_newlines, bp.get_entry_list())
            # Save the dictionary to the cache file.
            with open(savef, "w") as f:
                json.dump(bibitems, f)
        except:
            #            import urllib
            alpitems = []
            alpitems.append(
                alp.Item(
                    title="Rate limit was exceed",
                    subtitle="Search on the ADS website for '" + search + "'",
                    arg=encode_arguments(
                        type='url',
                        value="http://ui.adsabs.harvard.edu/#search/" +
                        urllib.urlencode({'q': search})
                        #                value   = search
                    )))
            return alpitems

    # Copy the cache file to the file contained the lastest results.
    shutil.copy(savef, lastf)

    # Parse the result dictionary to alp items.
    alpitems = []
    for bibitem in bibitems:
        alpitems.append(bibitem_to_alpitem(bibitem, search))

    # No results? Then tell the user, and offer to search the ADS website.


#    if len(alpitems) == 0:
# And return feedback for Alfred.
    return alpitems
示例#13
0
def inspire_search(search=""):
    """Searches Inspire."""

    import time
    import shutil
    import os
    import json
    import base64

    # Path for the temporary bibtex results.
    tempf = os.path.join(alp.cache(), "results.bib")
    # Path for the temporary latest parsed results.
    lastf = os.path.join(alp.cache(), "lastresults.json")
    # Path for the permanent cache of the query. Note the urlsafe encode.
    savef = os.path.join(alp.storage(),
                         base64.urlsafe_b64encode(search) + ".cache")

    # Check if cache file exists, and if it's not older than a week.
    try:
        # Get the modificiation time of the cache file.
        mt = os.path.getmtime(savef)
        # Get the current time.
        ct = time.time()
        # Is the difference in time less a number of days? Then use it as cache.
        usecache = ct - mt < (get_cache_setting() * 86400)
    except:
        # If the above fails (e.g. when the file doesn't exist), don't use cache.
        usecache = False

    if usecache:
        # Read the cache file and parse the JSON to a dictionary.
        with open(savef, "r") as f:
            bibitems = json.load(f)
    else:
        from bibtexparser.bparser import BibTexParser
        from pyinspire import pyinspire
        # Query Inspire and get the result in form of BibTeX.
        bibtex = pyinspire.get_text_from_inspire(search,
                                                 "bibtex").encode('utf-8')
        # Write the BibTeX to a file.
        with open(tempf, "w") as f:
            f.write(bibtex)
        # Parse the BibTeX from the same file.
        with open(tempf, "r") as f:
            bp = BibTexParser(f)
        # Get the bibtex as a dictionary and remove any newlines.
        bibitems = map(remove_newlines, bp.get_entry_list())
        # Save the dictionary to the cache file.
        with open(savef, "w") as f:
            json.dump(bibitems, f)

    # Copy the cache file to the file contained the lastest results.
    shutil.copy(savef, lastf)

    # Parse the result dictionary to alp items.
    alpitems = []
    for bibitem in bibitems:
        alpitems.append(bibitem_to_alpitem(bibitem, search))

    # No results? Then tell the user, and offer to search the Inspire website.
    if len(alpitems) == 0:
        import urllib
        alpitems.append(
            alp.Item(title="No results",
                     subtitle="Search on the INSPIRE website for '" + search +
                     "'",
                     arg=encode_arguments(
                         type='url',
                         value="http://inspirehep.net/search?ln=en&" +
                         urllib.urlencode({'p': search}))))

    # And return feedback for Alfred.
    return alpitems
def save_preset(preset_name):
    preset_dir = alp.storage(join='presets/%s/' % preset_name)
    os.makedirs(preset_dir)
    shutil.copy2(alp.cache('lights.json'), preset_dir)
    print 'Preset saved: %s' % preset_name
示例#15
0
def main(q):

    # Decode the arguments passed by Alfred.
    # The result is a dictionary with keys 'type', 'value', and 'notification'.
    args = json.loads(base64.b64decode(q))

    t = args['type']
    v = args['value']
    n = args['notification']

    #
    # Act on the various types.
    #

    # Open an URL in the default browser.
    if t == 'url':
        import webbrowser
        webbrowser.open(v)

    # Paste to clipboard.
    elif t == 'clipboard':
        import os
        import alp
        import subprocess
        # Paste to the clipboard via the command line util 'pbcopy'.
        # First, write the data to a file which 'pbcopy' will read.
        cpf = os.path.join(alp.cache(),"clipboard.txt")
        with open(cpf, "w") as f:
            f.write(v.encode('utf8'))
        # Now call 'pbcopy'. The following envvar must be set so that
        # pbcopy knows that we're dealing with unicode strings.
        os.environ["LC_CTYPE"] = "UTF-8"
        subprocess.call('pbcopy < "' + cpf + '"',shell=True)

    # Lookup Inspire record.
    elif t == 'inspirerecord':
        import urllib
        import webbrowser
        import xml.etree.ElementTree as ET

        # First, get the URL for the record by querying Inspire.

        # Get XML data from Inspire.
        url = "http://inspirehep.net/rss?" + urllib.urlencode({'ln':'en','p':v})
        try:
            f = urllib.urlopen(url)
            xml = f.read()
            f.close()
        except:
            return
        # Parse the XML.
        e = ET.fromstring(xml)
        for item in e.iter('item'):
            for link in item.iter('link'):
                recordurl = link.text
                break
            break

        # Now open the URL.
        webbrowser.open(recordurl)

    elif t == 'clearcache':
        import os
        import alp

        # Remove cache files from storage folder.
        for f in os.listdir(alp.storage()):
            file_path = os.path.join(alp.storage(), f)
            try:
                if os.path.isfile(file_path):
                    if os.path.splitext(f)[-1] == ".cache":
                        os.remove(file_path)
            except Exception, e:
                pass
示例#16
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import alp
import sqlite3

clone_database = alp.storage(join="zotquery.sqlite")
conn = sqlite3.connect(clone_database)
cur = conn.cursor()

#query = 'sem'


def title(query):
    title_query = """
	select items.key
	from items, itemData, fields, itemDataValues, itemTypes
	where
		items.itemID = itemData.itemID
		and itemData.fieldID = fields.fieldID
		and itemData.valueID = itemDataValues.valueID
		and items.itemTypeID = itemTypes.itemTypeID
		and itemTypes.typeName != "attachment"
		and (fields.fieldName = "title"
			or fields.fieldName = "publicationTitle"
			or fields.fieldName = "seriesTitle"
			or fields.fieldName = "series")
		and itemDataValues.value LIKE '%{0}%'
	""".format(query)
    title_info = cur.execute(title_query).fetchall()

    keys_list = []