def query(method, arg=_no_arg, **params): """ Query the Twitter API and parse the response. @param method: Twitter API method name. @param arg: Optional additional method "argument". A method argument is added to the API URL path and not as a query parameter, since some methods behave more naturally this way (C{'statuses/show'} is one such example.) @param **params: Additional keyword arguments are passed on as query parameters. @rtype: C{Deferred} => C{lxml.objectify.ObjectifiedElement} """ cat, name = method.split('/') names = [name] if arg is not _no_arg: names.append(arg) names[-1] += '.xml' url = TWITTER_API.child(cat) for name in names: url = url.child(name) for key, value in params.iteritems(): url = url.add(key, value) d = util.PerseverantDownloader(url).go() d.addErrback(handleError) d.addCallback( lambda (data, headers): lxml.objectify.fromstring(data)) return d
def slipgate(quoteID): quoteURL = SLIPGATE_URL.child(quoteID) def extractQuote(lines): lines = iter(lines) yield '%s -- %s' % (lines.next(), quoteURL) for line in lines: yield line url = quoteURL.child('raw') return util.PerseverantDownloader(url).go().addCallback( lambda (data, headers): data.splitlines()).addErrback( handleBadQuoteID, quoteID).addCallback(extractQuote)
def currencyExchange(currencyFrom, currencyTo): def gotCSV((data, headers)): row = csv.reader(StringIO(data)).next() lastTradeRate, d, t, bid, ask = row lastTradeRate = Decimal(lastTradeRate) if lastTradeRate == 0: raise errors.InvalidCurrency(u'One of the specified currency codes is invalid') tradeTime = u'%s %s' % (d, t) return Decimal(lastTradeRate), tradeTime url = CURRENCY_URL.add('s', '%s%s=X' % (currencyFrom, currencyTo)) return util.PerseverantDownloader(url).go( ).addCallback(gotCSV)
def qdbUS(quoteID): url = QDB_US_URL.child(quoteID) def extractQuote(tree): quote = tree.find('//form/table/tbody') header = unicode(''.join(quote.find('tr/td').itertext())).strip() text = unicode(''.join(quote.find('tr/td/p').itertext())).strip() yield u'%s -- %s' % (header, url) for line in text.splitlines(): yield line return util.PerseverantDownloader(url).go().addCallback( lambda (data, headers): parseHTML(data)).addErrback( handleBadQuoteID, quoteID).addCallback(extractQuote)
def getMoreResults(self, start=None): """ Retrieve and parse the next page of results. """ if start is None: if self.pages is None: start = u'0' elif self.pages: start = self.pages[0][u'start'] else: return defer.succeed([]) url = self.url.add('start', start) return util.PerseverantDownloader( url, headers=HEADERS).go().addCallback(self.parseResults)
def bash(quoteID): url = BASH_URL.add(quoteID) def extractQuote(tree): header = (t for t in tree.find('//p[@class="quote"]').itertext() if t not in ('+', '-', '[X]')) header = unicode(''.join(header), 'ascii').strip() text = unicode(''.join( tree.find('//p[@class="qt"]').itertext())).strip() yield u'%s -- %s' % (header, url) for line in text.splitlines(): yield line return util.PerseverantDownloader(url).go().addCallback( lambda (data, headers): parseHTML(data)).addErrback( handleBadQuoteID, quoteID).addCallback(extractQuote)
def search(term, limit=25): """ Query the Twitter search API and parse the result. @rtype: C{Deferred} => C{lxml.objectify.ObjectifiedElement} """ url = TWITTER_SEARCH.child('search.atom' ).add('q', term.encode('utf-8') ).add('rpp', limit ).add('result_type', 'mixed') d = util.PerseverantDownloader(url).go() d.addErrback(handleError) def getResults((data, headers)): root = lxml.objectify.fromstring(data) if not root.findall('{http://www.w3.org/2005/Atom}entry'): raise errors.NoSearchResults( u'No results for the search term: ' + term) return root d.addCallback(getResults) return d
def current(cls, query): url = cls.API_ROOT.child('WXCurrentObXML').child('index.xml').add( 'query', query) return util.PerseverantDownloader(url).go().addCallback( lambda (data, headers): etree.fromstring(data)).addCallback( WundergroundConditions.fromElement)
def _doFetch(headers): return util.PerseverantDownloader(url, headers=headers).go()
def _fetch(self, url): """ Fetch page data. """ return util.PerseverantDownloader(url).go()