def response(self, nick, args, kwargs): try: feed = rssparser.parse(self.rssurl) # get latest entry and their homepage url title = feed['items'][0]['title'].split() offer = ' '.join(title[:-2]) try: price = "$%.2f" % title[-1] except: price = '' longdescription = feed['items'][0]['description'] page = feed['items'][0]['link'] # strip out html longdescription = stripHTML(longdescription).strip() # these can get absurdly long if longdescription > self.max: longdescription = longdescription[:self.max - 4] + ' ...' return '%s: %s\n[%s]\n%s' % (offer, price, page, longdescription) except Exception, e: log.warn('error in %s: %s' % (self.__module__, e)) log.exception(e) return "%s: Couldn't load the page woot returned D:" % nick
def response(self, nick, args, kwargs): try: feed = rssparser.parse(self.rssurl) # get latest entry and their homepage url title = feed['items'][0]['title'].split() offer = ' '.join(title[:-2]) try: price = "$%.2f" % title[-1] except: price = '' longdescription = feed['items'][0]['description'] page = feed['items'][0]['link'] # strip out html longdescription = stripHTML(longdescription).strip() # these can get absurdly long if longdescription > self.max: longdescription = longdescription[:self.max-4] + ' ...' return '%s: %s\n[%s]\n%s' % (offer, price, page, longdescription) except Exception, e: log.warn('error in %s: %s' % (self.__module__, e)) log.exception(e) return "%s: Couldn't load the page woot returned D:" % nick
def response(self, nick, args, kwargs): try: try: user = args[0] except: user = None if user is None or user == '': doc = geturl(self.randomURL) user = re.search('"currentJournal": "(.*?)"', doc).group(1) url = urljoin(self.baseURL, '/users/%s/data/rss' % user) feed = rssparser.parse(url) # get latest entry and their homepage url entry = feed['items'][0]['description'] page = feed['channel']['link'] # strip out html entry = stripHTML(entry) # detect unusual amounts of high ascii, probably russian journal if isUTF8(entry): return '%s: Russian LJ :(' % nick # these can get absurdly long entry = entry[:self.max] return '%s: [%s] %s' % (nick, page, entry) except Exception, e: log.warn('error in %s: %s' % (self.__module__, e)) log.exception(e) return "%s: Couldn't load the page LJ returned D:" % nick
def war(self): try: rss = rssparser.parse(IraqWar._war_url) return rss['items'].pop(0)['title'] except Exception, e: log.warn('error in %s: %s' % (self.__module__, e)) log.exception(e) return 'UNKNOWN'
def response(self, nick, args, kwargs): query = args[0] try: if not query or query == 'headline': url = self._world_url else: url = self._search_url + urllib.quote(query) feed = rssparser.parse(url) item = feed['items'][0] url = item['link'] title = stripHTML(item['title']) sum = stripHTML(item['description']) return '\n'.join((url, title, sum)) except Exception, e: log.warn('error in %s: %s' % (self.__module__, e)) log.exception(e) return '%s: %s' % (nick, self._error)
def forecast(self, location): page = geturl(url=self.search, opts={'query': location}, referer=self.baseurl) soup = BeautifulSoup(page) # disambiguation page if 'Search Results' in str(soup): table = soup.find('table', attrs={'class': 'boxB full'}) rows = table.findAll('tr') results = [] match = None for row in rows: cells = row.findAll('td', attrs={'class': 'sortC'}) for cell in cells: link = cell.find('a') if link is None or 'addfav' in str(link['href']): continue city = str(link.contents[0]) href = urljoin(self.baseurl, str(link['href'])) results.append(city) if city.lower() == location.lower(): match = urljoin(self.baseurl, href) break if match: break if match: page = geturl(url=match) soup = BeautifulSoup(page) else: return 'Multiple results found: %s' % ', '.join(results) rss_url = soup.find('link', attrs=self._rss_link)['href'] rss = rssparser.parse(rss_url) title = str(soup.find('h1').string).strip() conditions = stripHTML(rss['items'][0]['description']) fields = self._bar.split(conditions) data = {} for field in fields: try: key, val = self._keyval.search(field).groups() data[key] = val except: pass try: temp = float(self._tempF.search(data['Temperature']).group(1)) blink = False if temp < 0: color = 6 elif temp >=0 and temp < 40: color = 2 elif temp >= 40 and temp < 60: color = 10 elif temp >= 60 and temp < 80: color = 3 elif temp >= 80 and temp < 90: color = 7 elif temp >= 90 and temp < 100: color = 5 elif temp >= 100: color = 5 blink = True data['Temperature'] = '\x03%s\x16\x16%s\x0F' % (color, data['Temperature']) if blink: data['Temperature'] = '\x1b[5m' + data['Temperature'] + \ '\x1b[0m' except: pass output = [] for key, val in data.items(): line = '%s: %s' % (key, val) output.append(line) output = ' | '.join(output) return '%s: %s' % (title, output)