Esempio n. 1
0
def do_feedback():
    q = alp.args()
    flowPath = os.path.split(alp.local())[0]
    cache = alp.jsonLoad("cache.json", default={})
    day_secs = 24 * 60 * 60
    force = (len(q) > 0 and q[0] == "|force|")
    t = time.time()
    if (force):
        import shutil
        _c = alp.cache()
        _s = alp.storage()
        shutil.rmtree(_c)
        shutil.rmtree(_s)

    if (cache.get("cache_time", 0) + day_secs > t) and not force:
        candidates = cache.get("cached_workflows", [])
    else:
        candidates = []
        for dirpath, dirnames, filenames in os.walk(flowPath, topdown=False, followlinks=True):
            for aFile in filenames:
                if aFile == "update.json":
                    try:
                        fn = os.path.join(dirpath, "Info.plist")
                        if not os.path.exists(fn):
                            fn = os.path.join(dirpath, "info.plist")

                        plist = alp.readPlist(fn)
                    except IOError as e:
                        alp.log("Exception: Info.plist not found ({0}).".format(e))
                        continue
                    else:
                        name = plist["name"]
                        local_description = plist["description"]
                        the_json = os.path.join(dirpath, aFile)
                        the_icon = os.path.join(dirpath, "icon.png")
                        if name != "Alleyoop":
                            candidates.append(dict(name=name, json=the_json,
                                icon=the_icon, path=dirpath,
                                description=local_description))
                        else:
                            downloads_path = os.path.expanduser("~/Downloads/")
                            candidates.append(dict(name=name, json=the_json,
                                icon=the_icon, path=downloads_path,
                                description=local_description))
        new_cache = dict(cache_time=t, cached_workflows=candidates)
        alp.jsonDump(new_cache, "cache.json")

    threads = []
    for candidict in candidates:
        try:
            with codecs.open(candidict["json"]) as f:
                local = json.load(f, encoding="utf-8")
        except Exception as e:
            alp.log("{0} may no longer exist: {1}".format(candidict["name"], e))
            continue

        ot = OopThread(local['remote_json'], force, candidict, local)
        threads.append(ot)
        ot.start()
    manage_threads(threads)
    def _load_lights_data_from_api(self, timeout=6):
        """Downloads lights data and caches it locally."""

        # Requests is an expensive import so we only do it when necessary.
        import requests

        settings = alp.Settings()

        r = requests.get(
            'http://{0}/api/{1}'.format(
                settings.get('bridge_ip'),
                settings.get('username'),
            ),
            timeout=timeout,
        )
        data = r.json()
        lights = data['lights']

        if settings.get('group'):
            lights = {lid: lights[lid] for lid in settings.get('group')}

        # Filter out anything that doesn't have an "xy" key in its state
        # e.g. "Dimmable plug-in unit", see: http://goo.gl/a5P7yN
        lights = {lid: lights[lid] for lid in lights if lights[lid]['state'].get('xy')}

        alp.jsonDump(lights, alp.cache('lights.json'))

        # Create icon for light
        for lid, light_data in lights.iteritems():
            self._create_light_icon(lid, light_data)
Esempio n. 3
0
def load_cache():
    try:
        cache = json.load(open(alp.cache("cache.json")))
        debug_print("Loaded cache")
    except:
        cache = None
    return cache
Esempio n. 4
0
def load_cache():
    try:
        cache = json.load(open(alp.cache("cache.json")))
        debug_print("Loaded cache")
    except:
        cache = None
    return cache
Esempio n. 5
0
def checkCache(delete=False):
    filepath = alp.cache()
    f = '%s_requests_cache.sqlite' % alp.bundle()
    fullpath = os.path.join(filepath,f)
    if os.path.exists(fullpath):
        if ((datetime.now() - datetime.fromtimestamp(os.path.getmtime(fullpath))) > timedelta(hours=6)) or delete:
            try:
                os.remove(fullpath)
                alp.log('Successfully removed requests cache')
            except:
                alp.log('Problem: Could not remove requests cache')
    return
Esempio n. 6
0
def cacheIcon(url):
    iconRequest = alp.Request(url)

    covercache = alp.cache("covercache")
    if not os.path.exists(covercache):
        os.makedirs(covercache)

    (_, filename) = os.path.split(url)
    iconPath = os.path.join(covercache, filename)
    with open(iconPath, "wb") as f:
        f.write(iconRequest.request.content)

    return iconPath
Esempio n. 7
0
def cacheIcon(url):
    iconRequest = alp.Request(url)

    covercache = alp.cache("covercache")
    if not os.path.exists(covercache):
        os.makedirs(covercache)

    (_, filename) = os.path.split(url)
    iconPath = os.path.join(covercache, filename)
    with open(iconPath, "wb") as f:
        f.write(iconRequest.request.content)

    return iconPath
Esempio n. 8
0
def artworkCache(url):
    cachePath = alp.cache("artcache")
    if not os.path.exists(cachePath):
        os.makedirs(cachePath)

    (_, filename) = os.path.split(url)
    artPath = os.path.join(cachePath, filename)

    if os.path.exists(artPath):
        return artPath
    else:
        artScraper = alp.Request(url)
        with open(artPath, "wb") as f:
            f.write(artScraper.request.content)
            f.close()
        return artPath
Esempio n. 9
0
def artworkCache(url):
    cachePath = alp.cache("artcache")
    if not os.path.exists(cachePath):
        os.makedirs(cachePath)

    (_, filename) = os.path.split(url)
    artPath = os.path.join(cachePath, filename)

    if os.path.exists(artPath):
        return artPath
    else:
        artScraper = alp.Request(url)
        with open(artPath, "wb") as f:
            f.write(artScraper.request.content)
            f.close()
        return artPath
Esempio n. 10
0
    def _get_lights(self, from_cache=False):
        """Returns a dictionary of lid => data, or None if no lights data is in the cache.

        Options:
            from_cache - Read data from cached json files instead of querying the API.
        """
        output = dict()

        if not from_cache:
            from requests.exceptions import RequestException
            try:
                self._load_lights_data_from_api()
            except RequestException:
                return None

        lights = alp.jsonLoad(alp.cache('lights.json'))

        return lights
Esempio n. 11
0
def artworkCache(url):
    cachePath = alp.cache("artcache")
    if not os.path.exists(cachePath):
        os.makedirs(cachePath)

    (urlFragment, filename) = os.path.split(url)
    (_, prefix) = os.path.split(urlFragment)
    prefix = os.path.basename(os.path.normpath(prefix))
    filename = prefix + filename
    artPath = os.path.join(cachePath, filename)

    if os.path.exists(artPath):
        return artPath
    else:
        artScraper = alp.Request(url)
        with open(artPath, "wb") as f:
            f.write(artScraper.request.content)
            f.close()
        return artPath
Esempio n. 12
0
def artworkCache(url):
    cachePath = alp.cache("artcache")
    if not os.path.exists(cachePath):
        os.makedirs(cachePath)

    (urlFragment, filename) = os.path.split(url)
    (_, prefix) = os.path.split(urlFragment)
    prefix = os.path.basename(os.path.normpath(prefix))
    filename = prefix + filename
    artPath = os.path.join(cachePath, filename)

    if os.path.exists(artPath):
        return artPath
    else:
        artScraper = alp.Request(url)
        with open(artPath, "wb") as f:
            f.write(artScraper.request.content)
            f.close()
        return artPath
Esempio n. 13
0
def context_menu(key="",search=""):
    """Returns the context menu for a result item"""

    # This method takes only the key (id) of the actioned item as an argument.
    # So we need to load the last results, and find the item of that key.

    import os
    import json
    import time

    bid = alp.bundle() + str(time.time()) 

    # Load the parsed results from the latest Inspire search.
    lastf = os.path.join(alp.cache(),"lastresults.json")
    with open(lastf,"r") as f:
        items = json.load(f)

    # Lookup the item from the results.
    for i in items:
        if 'id' in i:
            if i['id'] == key:
                item = i
                break

    # Populate the context menu action list.
    actions = []

    # Link to the Inspire record page.
    actions.append(
        alp.Item(
            title    = item['title'],
            subtitle = "Open INSPIRE record page in browser",
            arg      = encode_arguments(type='inspirerecord',value=item['id']),
            uid      = bid+"inspirerecord"
        )
    )

    # Author search.
    authors = item['author'].split(" and ")
    if len(authors) == 1:
        actions.append(
            alp.Item(
                title        = item['author'],
                subtitle     = "Find more papers of author",
                valid        = "no",
                autocomplete = "find a "+ item['author'] + alfred_delim,
                uid          = bid + "authors"
            )
        )
    else:
        actions.append(
            alp.Item(
                title        = authors_to_lastnames(item['author']),
                subtitle     = "Find more papers of authors",
                valid        = "no",
                autocomplete = search + alfred_delim + key + alfred_delim + item['author'] + alfred_delim,
                uid          = bid + "authors"
            )
        )   

    # Link to resolve the DOI.
    if 'doi' in item:
        url = "http://dx.doi.org/" + item['doi']
        actions.append(
            alp.Item(
                title    = bibitem_to_journaltext(item),
                subtitle = "Open DOI in browser",
                arg      = encode_arguments(type='url',value=url),
                uid      = bid + "doi"
            )
        )

    # Next, the option to open the PDF from arXiv.
    if 'eprint' in item:
        if item['archiveprefix'] != 'arXiv':
            urlprefix = item['archiveprefix'] + "/"
            prefix = urlprefix
        else:
            urlprefix = ""
            prefix = 'arXiv:'
        url = "http://arxiv.org/pdf/" + urlprefix + item['eprint']
        filename = os.path.join(
            get_local_dir(),
            (item['eprint'] + " " + authors_to_lastnames(item['author']) + " - " + item['title'] + '.pdf').replace('/','_').replace(':','_')
        )
        actions.append(
            alp.Item(
                title    = prefix + item['eprint'],
                subtitle = "Download and open PDF",
                arg      = encode_arguments(type='getpdf',value=[url,filename]),
                uid      = bid + "arxivpdf"
            )
        )

    # The option to lookup references.
    actions.append(
        alp.Item(
            title        = "References",
            subtitle     = "Find papers that this paper cites",
            valid        = "no",
            autocomplete = "citedby:" + key + alfred_delim,
            uid          = bid + "refs"
        )
    )

    # The option to lookup citations.
    actions.append(
        alp.Item(
            title        = "Citations",
            subtitle     = "Find papers that cite this paper",
            valid        = "no",
            autocomplete = "refersto:" + key + alfred_delim,
            uid          = bid + "cites"
        )
    )

    # The option to copy the bibtex of the current item to the clipboard.
    actions.append(
        alp.Item(
            title       = "BibTeX",
            subtitle    = "Copy BibTeX to clipboard",
            uid         = bid+"bibtex",
            arg         = encode_arguments(
                type         = 'clipboard',
                value        = bibitem_to_bibtex(item),
                notification = {
                    'title':'Copied BibTeX to clipboard',
                    'text':'BibTeX for ' + key + ' has been copied to the clipboard'
                }
            )
        )
    )

    # And return.
    return actions
Esempio n. 14
0
def inspire_search(search=""):
    """Searches Inspire."""

    import time
    import shutil
    import os
    import json
    import base64

    # Path for the temporary bibtex results.
    tempf = os.path.join(alp.cache(), "results.bib")
    # Path for the temporary latest parsed results.
    lastf = os.path.join(alp.cache(), "lastresults.json")
    # Path for the permanent cache of the query. Note the urlsafe encode.
    savef = os.path.join(alp.storage(),
                         base64.urlsafe_b64encode(search) + ".cache")

    # Check if cache file exists, and if it's not older than a week.
    try:
        # Get the modificiation time of the cache file.
        mt = os.path.getmtime(savef)
        # Get the current time.
        ct = time.time()
        # Is the difference in time less a number of days? Then use it as cache.
        usecache = ct - mt < (get_cache_setting() * 86400)
    except:
        # If the above fails (e.g. when the file doesn't exist), don't use cache.
        usecache = False

    if usecache:
        # Read the cache file and parse the JSON to a dictionary.
        with open(savef, "r") as f:
            bibitems = json.load(f)
    else:
        from bibtexparser.bparser import BibTexParser
        from pyinspire import pyinspire
        # Query Inspire and get the result in form of BibTeX.
        bibtex = pyinspire.get_text_from_inspire(search,
                                                 "bibtex").encode('utf-8')
        # Write the BibTeX to a file.
        with open(tempf, "w") as f:
            f.write(bibtex)
        # Parse the BibTeX from the same file.
        with open(tempf, "r") as f:
            bp = BibTexParser(f)
        # Get the bibtex as a dictionary and remove any newlines.
        bibitems = map(remove_newlines, bp.get_entry_list())
        # Save the dictionary to the cache file.
        with open(savef, "w") as f:
            json.dump(bibitems, f)

    # Copy the cache file to the file contained the lastest results.
    shutil.copy(savef, lastf)

    # Parse the result dictionary to alp items.
    alpitems = []
    for bibitem in bibitems:
        alpitems.append(bibitem_to_alpitem(bibitem, search))

    # No results? Then tell the user, and offer to search the Inspire website.
    if len(alpitems) == 0:
        import urllib
        alpitems.append(
            alp.Item(title="No results",
                     subtitle="Search on the INSPIRE website for '" + search +
                     "'",
                     arg=encode_arguments(
                         type='url',
                         value="http://inspirehep.net/search?ln=en&" +
                         urllib.urlencode({'p': search}))))

    # And return feedback for Alfred.
    return alpitems
Esempio n. 15
0
                ending_balance = 0
            else:
                ending_balance = round(ending_balance, 2)

            if ending_balance < 0:
                ending_text = "Overspent on %s this month!"
                icon = "icon-no.png"
            elif ending_balance == 0:
                ending_text = "No budget left for %s this month"
                icon = "icon-no.png"
            else:
                ending_text = "Remaining balance for %s this month"
                icon = "icon-yes.png"
            try:
                i = alp.Item(title=locale.currency(ending_balance, True, True).decode("latin1"), subtitle = ending_text % r["name"], uid = entityId, valid = False, icon = icon)
            except Exception, e:
                i = alp.Item(title="%0.2f" % ending_balance, subtitle = ending_text % r["name"], uid = entityId, valid = False, icon = icon)
            items.append(i)

    alp.feedback(items)
    if not cache is None:
        try:
            if "cache_date" not in cache:
                cache["cache_date"] = int(time.time())
            #debug_print(cache)
            json.dump(cache, open(alp.cache("cache.json"), "w"))
        except Exception, e:
            pass


Esempio n. 16
0
def inspire_search(search=""):
    """Searches Inspire."""

    import time
    import shutil
    import os
    import json
    import base64

    # Path for the temporary bibtex results.
    tempf = os.path.join(alp.cache(),"results.bib")
    # Path for the temporary latest parsed results.
    lastf = os.path.join(alp.cache(),"lastresults.json")
    # Path for the permanent cache of the query. Note the urlsafe encode.
    savef = os.path.join(alp.storage(),base64.urlsafe_b64encode(search) + ".cache")

    # Check if cache file exists, and if it's not older than a week.
    try:
        # Get the modificiation time of the cache file.
        mt = os.path.getmtime(savef)
        # Get the current time.
        ct = time.time()
        # Is the difference in time less a number of days? Then use it as cache.
        usecache =  ct - mt < ( get_cache_setting() * 86400 )
    except:
        # If the above fails (e.g. when the file doesn't exist), don't use cache.
        usecache = False

    if usecache:
        # Read the cache file and parse the JSON to a dictionary.
        with open(savef,"r") as f:
            bibitems = json.load(f)
    else:
        from bibtexparser.bparser import BibTexParser
        from pyinspire import pyinspire
        # Query Inspire and get the result in form of BibTeX.
        bibtex = pyinspire.get_text_from_inspire(search,"bibtex").encode('utf-8')
        # Write the BibTeX to a file.
        with open(tempf,"w") as f:
            f.write(bibtex)
        # Parse the BibTeX from the same file.
        with open(tempf,"r") as f:
            bp = BibTexParser(f)
        # Get the bibtex as a dictionary and remove any newlines.
        bibitems = map(remove_newlines,bp.get_entry_list())
        # Save the dictionary to the cache file.
        with open(savef,"w") as f:
            json.dump(bibitems,f)

    # Copy the cache file to the file contained the lastest results.
    shutil.copy(savef,lastf)

    # Parse the result dictionary to alp items.
    alpitems = []
    for bibitem in bibitems:
        alpitems.append(bibitem_to_alpitem(bibitem,search))

    # No results? Then tell the user, and offer to search the Inspire website.
    if len(alpitems) == 0:
        import urllib
        alpitems.append(alp.Item(
            title       = "No results",
            subtitle    = "Search on the INSPIRE website for '" + search + "'",
            arg=encode_arguments(
                type    = 'url',
                value   = "http://inspirehep.net/search?ln=en&" + urllib.urlencode({'p':search})
            )
        ))

    # And return feedback for Alfred.
    return alpitems
def save_preset(preset_name):
    preset_dir = alp.storage(join='presets/%s/' % preset_name)
    os.makedirs(preset_dir)
    shutil.copy2(alp.cache('lights.json'), preset_dir)
    print 'Preset saved: %s' % preset_name
Esempio n. 18
0
def main(q):

    # Decode the arguments passed by Alfred.
    # The result is a dictionary with keys 'type', 'value', and 'notification'.
    args = json.loads(base64.b64decode(q))

    t = args['type']
    v = args['value']
    n = args['notification']

    #
    # Act on the various types.
    #

    # Open an URL in the default browser.
    if t == 'url':
        import webbrowser
        webbrowser.open(v)

#    elif t == 'ads':
#        import scriptfilter.get_token_setting as get_token
#        import ads.sandbox as ads
#
#        ads.config.token = get_token()
#        pp = ads.SearchQuery(q=v)

    # Past to clipboard.
    elif t == 'clipboard':
        import os
        import alp
        import subprocess
        # Paste to the clipboard via the command line util 'pbcopy'.
        # First, write the data to a file which 'pbcopy' will read.
        cpf = os.path.join(alp.cache(),"clipboard.txt")
        with open(cpf, "w") as f:
            f.write(v)
        # Now call 'pbcopy'.
        subprocess.call('pbcopy < "' + cpf + '"',shell=True)

    # Lookup Inspire record.
    elif t == 'inspirerecord':

        import urllib
        import webbrowser
        import xml.etree.ElementTree as ET

        # First, get the URL for the record by querying Inspire.

        # Get XML data from Inspire.
        url = "http://inspirehep.net/rss?" + urllib.urlencode({'ln':'en','p':v})
        try:
            f = urllib.urlopen(url)
            xml = f.read()
            f.close()
        except:
            return
        # Parse the XML.
        e = ET.fromstring(xml)
        for item in e.iter('item'):
            for link in item.iter('link'):
                recordurl = link.text
                break
            break

        # Now open the URL.
        webbrowser.open(recordurl)

    elif t == 'clearcache':
        import os
        import alp

        # Remove cache files from storage folder.
        for f in os.listdir(alp.storage()):
            file_path = os.path.join(alp.storage(), f)
            try:
                if os.path.isfile(file_path):
                    if os.path.splitext(f)[-1] == ".cache":
                        os.remove(file_path)
            except Exception, e:
                pass
Esempio n. 19
0
            elif ending_balance == 0:
                ending_text = "No budget left for %s this month"
                icon = "icon-no.png"
            else:
                ending_text = "Remaining balance for %s this month"
                icon = "icon-yes.png"
            try:
                i = alp.Item(title=locale.currency(ending_balance, True,
                                                   True).decode("latin1"),
                             subtitle=ending_text % r["name"],
                             uid=entityId,
                             valid=False,
                             icon=icon)
            except Exception, e:
                i = alp.Item(title="%0.2f" % ending_balance,
                             subtitle=ending_text % r["name"],
                             uid=entityId,
                             valid=False,
                             icon=icon)
            items.append(i)

    alp.feedback(items)
    if not cache is None:
        try:
            if "cache_date" not in cache:
                cache["cache_date"] = int(time.time())
            #debug_print(cache)
            json.dump(cache, open(alp.cache("cache.json"), "w"))
        except Exception, e:
            pass
Esempio n. 20
0
 def image_cache(self):
     cache = alp.cache('cards')
     if not os.path.exists(cache):
         os.makedirs(cache)
     return cache
Esempio n. 21
0
def context_ads_menu(key="", search=""):
    """Returns the context menu for ads result item"""

    # This method takes only the key (id) of the actioned item as an argument.
    # So we need to load the last results, and find the item of that key.
    # ADS should have adsurl

    import os
    import json
    import time

    bid = alp.bundle() + str(time.time())

    # Load the parsed results from the latest Inspire search.
    lastf = os.path.join(alp.cache(), "lastresults.json")
    with open(lastf, "r") as f:
        items = json.load(f)

    # Lookup the item from the results.
    for i in items:
        if 'id' in i:
            if i['id'] == key:
                item = i
                break

    # Populate the context menu action list.
    actions = []

    # Link to the ADS record page.
    # if 'adsurl' in item:
    actions.append(
        alp.Item(title=item['title'],
                 subtitle="Open ADS record page in browser",
                 arg=encode_arguments(type='url', value=item['adsurl']),
                 uid=bid + "adsrecord"))

    # Author search.
    authors = item['author'].split(" and ")
    if len(authors) == 1:
        actions.append(
            alp.Item(title=item['author'],
                     subtitle="Find more papers of author",
                     valid="no",
                     autocomplete="author: " + item['author'] + alfred_delim,
                     uid=bid + "authors"))
    else:
        actions.append(
            alp.Item(title=authors_to_lastnames(item['author']),
                     subtitle="Find more papers of authors",
                     valid="no",
                     autocomplete=search + alfred_delim + key + alfred_delim +
                     item['author'] + alfred_delim,
                     uid=bid + "authors"))

    # Link to resolve the DOI.
    if 'doi' in item:
        url = "http://dx.doi.org/" + item['doi']
        actions.append(
            alp.Item(title=bibitem_to_journaltext(item),
                     subtitle="Open DOI in browser",
                     arg=encode_arguments(type='url', value=url),
                     uid=bid + "doi"))

    # Next, the option to open the PDF from arXiv.
    if 'eprint' in item:
        #if item['archiveprefix'] != 'arXiv':
        #    urlprefix = item['archiveprefix'] + "/"
        #    prefix = urlprefix
        #else:
        urlprefix = ""
        prefix = 'arXiv:'
        url = "http://arxiv.org/pdf/" + urlprefix + item['eprint']
        filename = os.path.join(
            get_local_dir(),
            (item['eprint'] + " " + authors_to_lastnames(item['author']) +
             " - " + item['title'] + '.pdf').replace('/',
                                                     '_').replace(':', '_'))
        actions.append(
            alp.Item(title=prefix + item['eprint'],
                     subtitle="Download and open PDF",
                     arg=encode_arguments(type='getpdf', value=[url,
                                                                filename]),
                     uid=bid + "arxivpdf"))

    # The option to lookup references.
    actions.append(
        alp.Item(title="References",
                 subtitle="Find papers that this paper cites",
                 valid="no",
                 autocomplete="references(bibcode:" + key + ")" + alfred_delim,
                 uid=bid + "refs"))

    # The option to lookup citations.
    actions.append(
        alp.Item(title="Citations",
                 subtitle="Find papers that cite this paper",
                 valid="no",
                 autocomplete="citations(bibcode:" + key + ")" + alfred_delim,
                 uid=bid + "cites"))

    # The option to copy the bibtex of the current item to the clipboard.
    actions.append(
        alp.Item(title="BibTeX",
                 subtitle="Copy BibTeX to clipboard",
                 uid=bid + "bibtex",
                 arg=encode_arguments(type='clipboard',
                                      value=bibitem_to_bibtex(item),
                                      notification={
                                          'title':
                                          'Copied BibTeX to clipboard',
                                          'text':
                                          'BibTeX for ' + key +
                                          ' has been copied to the clipboard'
                                      })))

    # And return.
    return actions
Esempio n. 22
0
def ads_search(search=""):
    """Searches ADS."""

    import time
    import shutil
    import os
    import json
    import base64

    # Path for the temporary bibtex results.
    tempf = os.path.join(alp.cache(), "results.bib")
    # Path for the temporary latest parsed results.
    lastf = os.path.join(alp.cache(), "lastresults.json")
    # Path for the permanent cache of the query. Note the urlsafe encode.
    savef = os.path.join(alp.storage(),
                         base64.urlsafe_b64encode(search) + ".cache")

    # Check if cache file exists, and if it's not older than a week.
    try:
        # Get the modificiation time of the cache file.
        mt = os.path.getmtime(savef)
        # Get the current time.
        ct = time.time()
        # Is the difference in time less a number of days? Then use it as cache.
        usecache = ct - mt < (get_cache_setting() * 86400)
    except:
        # If the above fails (e.g. when the file doesn't exist), don't use cache.
        usecache = False

    if usecache:
        # Read the cache file and parse the JSON to a dictionary.
        with open(savef, "r") as f:
            bibitems = json.load(f)
    else:
        from bibtexparser.bparser import BibTexParser
        #   from pyinspire import pyinspire
        #        import ads.sandbox as ads
        import ads
        import urllib

        ads.config.token = get_token_setting()
        # Query ADS and get the result in form of BibTeX.
        alpitems = []
        alpitems.append(
            alp.Item(
                title="Open ADS for search",
                subtitle="Search on the ADS website for '" + search + "'",
                arg=encode_arguments(
                    type='url',
                    value="http://ui.adsabs.harvard.edu/#search/" +
                    urllib.urlencode({'q': search})
                    #                value   = search
                )))
        # papers in ads
        try:
            ppp = (ads.SearchQuery(q=search, fl=['bibcode', 'bibtex'], rows=8))
            # get the bibtex
            bibtex = ""
            bibcode = []
            for pp in ppp:
                bibcode = bibcode + [pp.bibcode]
                #                bibtex = bibtex + ads.ExportQuery(bibcode).execute()
                bibtex = bibtex + pp.bibtex.encode("utf-8")

    #        bibtex = pyinspire.get_text_from_inspire(search,"bibtex").encode('utf-8')
    # Write the BibTeX to a file.
            with open(tempf, "w") as f:
                f.write(bibtex)
            # Parse the BibTeX from the same file.
            with open(tempf, "r") as f:
                bp = BibTexParser(f)
            # Get the bibtex as a dictionary and remove any newlines.
            bibitems = map(remove_newlines, bp.get_entry_list())
            # Save the dictionary to the cache file.
            with open(savef, "w") as f:
                json.dump(bibitems, f)
        except:
            #            import urllib
            alpitems = []
            alpitems.append(
                alp.Item(
                    title="Rate limit was exceed",
                    subtitle="Search on the ADS website for '" + search + "'",
                    arg=encode_arguments(
                        type='url',
                        value="http://ui.adsabs.harvard.edu/#search/" +
                        urllib.urlencode({'q': search})
                        #                value   = search
                    )))
            return alpitems

    # Copy the cache file to the file contained the lastest results.
    shutil.copy(savef, lastf)

    # Parse the result dictionary to alp items.
    alpitems = []
    for bibitem in bibitems:
        alpitems.append(bibitem_to_alpitem(bibitem, search))

    # No results? Then tell the user, and offer to search the ADS website.


#    if len(alpitems) == 0:
# And return feedback for Alfred.
    return alpitems
Esempio n. 23
0
#!/usr/bin/env python
import sys
import os
import time
import re
import subprocess
import cPickle
import multiprocessing

sys.path.append(os.path.join(os.path.dirname(__file__), 'alp'))
import alp

settings = alp.Settings()

projects_cache = alp.cache(join='projects.cache')

if settings.get('project_types') is None:
    settings.set(project_types=(
        ".sublime-workspace",
    ))


def get_project_title(project_path):
    return os.path.splitext(os.path.basename(project_path))[0]


def serialize_project(project_path):
    meta = dict(
        title=get_project_title(project_path),
        subtitle=project_path,
        arg=project_path,
Esempio n. 24
0
def do_feedback():
    q = alp.args()
    flowPath = os.path.split(alp.local())[0]
    cache = alp.jsonLoad("cache.json", default={})
    day_secs = 24 * 60 * 60
    force = (len(q) > 0 and q[0] == "|force|")
    t = time.time()
    if (force):
        import shutil
        _c = alp.cache()
        _s = alp.storage()
        shutil.rmtree(_c)
        shutil.rmtree(_s)

    if (cache.get("cache_time", 0) + day_secs > t) and not force:
        candidates = cache.get("cached_workflows", [])
    else:
        candidates = []
        for dirpath, dirnames, filenames in os.walk(flowPath,
                                                    topdown=False,
                                                    followlinks=True):
            for aFile in filenames:
                if aFile == "update.json":
                    try:
                        fn = os.path.join(dirpath, "Info.plist")
                        if not os.path.exists(fn):
                            fn = os.path.join(dirpath, "info.plist")

                        plist = alp.readPlist(fn)
                    except IOError as e:
                        alp.log(
                            "Exception: Info.plist not found ({0}).".format(e))
                        continue
                    else:
                        name = plist["name"]
                        local_description = plist["description"]
                        the_json = os.path.join(dirpath, aFile)
                        the_icon = os.path.join(dirpath, "icon.png")
                        if name != "Alleyoop":
                            candidates.append(
                                dict(name=name,
                                     json=the_json,
                                     icon=the_icon,
                                     path=dirpath,
                                     description=local_description))
                        else:
                            downloads_path = os.path.expanduser("~/Downloads/")
                            candidates.append(
                                dict(name=name,
                                     json=the_json,
                                     icon=the_icon,
                                     path=downloads_path,
                                     description=local_description))
        new_cache = dict(cache_time=t, cached_workflows=candidates)
        alp.jsonDump(new_cache, "cache.json")

    threads = []
    for candidict in candidates:
        try:
            with codecs.open(candidict["json"]) as f:
                local = json.load(f, encoding="utf-8")
        except Exception as e:
            alp.log("{0} may no longer exist: {1}".format(
                candidict["name"], e))
            continue

        ot = OopThread(local['remote_json'], force, candidict, local)
        threads.append(ot)
        ot.start()
    manage_threads(threads)
Esempio n. 25
0
def main(q):

    # Decode the arguments passed by Alfred.
    # The result is a dictionary with keys 'type', 'value', and 'notification'.
    args = json.loads(base64.b64decode(q))

    t = args['type']
    v = args['value']
    n = args['notification']

    #
    # Act on the various types.
    #

    # Open an URL in the default browser.
    if t == 'url':
        import webbrowser
        webbrowser.open(v)

    # Paste to clipboard.
    elif t == 'clipboard':
        import os
        import alp
        import subprocess
        # Paste to the clipboard via the command line util 'pbcopy'.
        # First, write the data to a file which 'pbcopy' will read.
        cpf = os.path.join(alp.cache(),"clipboard.txt")
        with open(cpf, "w") as f:
            f.write(v.encode('utf8'))
        # Now call 'pbcopy'. The following envvar must be set so that
        # pbcopy knows that we're dealing with unicode strings.
        os.environ["LC_CTYPE"] = "UTF-8"
        subprocess.call('pbcopy < "' + cpf + '"',shell=True)

    # Lookup Inspire record.
    elif t == 'inspirerecord':
        import urllib
        import webbrowser
        import xml.etree.ElementTree as ET

        # First, get the URL for the record by querying Inspire.

        # Get XML data from Inspire.
        url = "http://inspirehep.net/rss?" + urllib.urlencode({'ln':'en','p':v})
        try:
            f = urllib.urlopen(url)
            xml = f.read()
            f.close()
        except:
            return
        # Parse the XML.
        e = ET.fromstring(xml)
        for item in e.iter('item'):
            for link in item.iter('link'):
                recordurl = link.text
                break
            break

        # Now open the URL.
        webbrowser.open(recordurl)

    elif t == 'clearcache':
        import os
        import alp

        # Remove cache files from storage folder.
        for f in os.listdir(alp.storage()):
            file_path = os.path.join(alp.storage(), f)
            try:
                if os.path.isfile(file_path):
                    if os.path.splitext(f)[-1] == ".cache":
                        os.remove(file_path)
            except Exception, e:
                pass