def subtitle_entries_date(self): """ Add subtitle (date begin, date end) """ if len(self.entries) > 3: begin = pretty_date(self.entries[0].dt) end = pretty_date(self.entries[len(self.entries) - 1].dt) self.json["subtitle"]["text"] = ' du {0} au {1}'.format(begin, end) self.json["subtitle"]["useHTML"] = True self.json["subtitle"]["verticalAlign"] = 'top' self.json["subtitle"]["y"] = 40
def main(): try: q = unicode(sys.argv[1]) q = unicodedata.normalize('NFC', q).lower().replace(' ','') except: q = "" rss = rss_data() config = config_data() try: max_results = config['max_results'] except: max_results = None results = [] for e in itertools.islice(rss,max_results): if not q or q in e['title'].lower().replace(' ',''): results.append(alfred.Item(title=e['title'],subtitle=e['published'],attributes={'arg':e['link']},icon=e['image'])) try: last_updated = config['last_updated'] except: last_updated = 0 subtitle = "Last updated: "+(last_updated and util.pretty_date(config['last_updated']) or "no info") diff = int(time.time())-last_updated if diff > RELOAD_ASK_THRESHOLD or len(rss) == 0: results.insert(0,alfred.Item(title="BackToTheMac - Reload Data?", subtitle=subtitle, attributes={'arg':'reload','uid':alfred.uid('t')}, icon="icon.png")) else: results.insert(0,alfred.Item(title="BackToTheMac", subtitle=subtitle, attributes={'arg':'http://macnews.tistory.com','uid':alfred.uid('t')}, icon="icon.png")) alfred.write(alfred.xml(results, maxresults=None))
def cmd_showbet(self, num): if BeautifulSoup is None: return text = ("Bet %(id)d %(win)s %(profit)s. " "%(lucky)s %(gt)s %(target)s. Player: %(user)s [%(ago)s]") data = urllib2.urlopen(ROLL % num).read() if data.lower().startswith('there is no'): # Nothing to see. return soup = BeautifulSoup(data) timestamp = soup.findAll('script')[2].text ts_where = timestamp.find('moment') timestamp = util.pretty_date(int(timestamp[ts_where+8:ts_where+18])) labels = soup.findAll(attrs={'class': 'slabel'}) data = soup.findAll('span') userid = data[2].text.strip() win = 'won' if data[9].text.strip()[0] == 'w' else 'lost' gt = labels[7].text[-1] params = {'id': int(data[0].text.strip()), 'win': win, 'profit': data[5].text.strip(), 'lucky': data[8].text.strip(), 'gt': gt, 'target': data[7].text.strip(), 'user': userid, 'ago': timestamp} print 'show bet %d' % int(num) self.sock.emit('chat', self.csrf, (text % params).encode('utf8'))
def cmd_showbet(self, num): if BeautifulSoup is None: return text = ("Bet %(id)d %(win)s %(profit)s. " "%(lucky)s %(gt)s %(target)s. Player: %(user)s [%(ago)s]") data = urllib2.urlopen(ROLL % num).read() if data.lower().startswith('there is no'): # Nothing to see. return soup = BeautifulSoup(data) timestamp = soup.findAll('script')[2].text ts_where = timestamp.find('moment') timestamp = util.pretty_date(int(timestamp[ts_where + 8:ts_where + 18])) labels = soup.findAll(attrs={'class': 'slabel'}) data = soup.findAll('span') userid = data[2].text.strip() win = 'won' if data[9].text.strip()[0] == 'w' else 'lost' gt = labels[7].text[-1] params = { 'id': int(data[0].text.strip()), 'win': win, 'profit': data[5].text.strip(), 'lucky': data[8].text.strip(), 'gt': gt, 'target': data[7].text.strip(), 'user': userid, 'ago': timestamp } print 'show bet %d' % int(num) self.sock.emit('chat', self.csrf, (text % params).encode('utf8'))
def Send_SMS_Chaudiere_Alert(dt): """ To be called by ext package Prepare an sms to Alert all recipients """ date = pretty_date(dt) body = '''Alerte Temperature basse atteinte. le ''' + date sender = 'Chaudiere Montlevic' recipients = app.config['USERS_PHONES'] send_sms(sender, recipients, body)
def main(): try: q = unicode(sys.argv[1]) q = unicodedata.normalize('NFC', q).lower().replace(' ', '') except: q = "" rss = rss_data() config = config_data() try: max_results = config['max_results'] except: max_results = None results = [] for e in itertools.islice(rss, max_results): if not q or q in e['title'].lower().replace(' ', ''): results.append( alfred.Item(title=e['title'], subtitle=e['published'], attributes={'arg': e['link']}, icon=e['image'])) try: last_updated = config['last_updated'] except: last_updated = 0 subtitle = "Last updated: " + ( last_updated and util.pretty_date(config['last_updated']) or "no info") diff = int(time.time()) - last_updated if diff > RELOAD_ASK_THRESHOLD or len(rss) == 0: results.insert( 0, alfred.Item(title="BackToTheMac - Reload Data?", subtitle=subtitle, attributes={ 'arg': 'reload', 'uid': alfred.uid('t') }, icon="icon.png")) else: results.insert( 0, alfred.Item(title="BackToTheMac", subtitle=subtitle, attributes={ 'arg': 'http://macnews.tistory.com', 'uid': alfred.uid('t') }, icon="icon.png")) alfred.write(alfred.xml(results, maxresults=None))
def Send_Mail_Chaudiere_Alert(dt): date = pretty_date(dt) subject = '''Alerte Arret Chaudiere le {0}'''.format(date) html_body = ''' Température basse atteinte (T<65°) <br> historique des 12 dernières heures : http://montlevic.hd.free.fr:5007/charts/now/12 ''' body = ''' Température basse atteinte (T<65°) http://montlevic.hd.free.fr:5007/charts/now/6 ''' sender = app.config['MAIL_USERNAME'] recipients = app.config['USERS_EMAILS'] send_email(subject, sender, recipients, body, html_body)
def get_movies(order, direction, minvotes, maxvotes, minrating, maxrating, total_count=20, cursor=None): key = "[movies]%s;%s;%s;%s;%s;%s;%s;%s" % (order, direction, minvotes, maxvotes, minrating, maxrating, total_count, cursor) movies = memcache.get(key, namespace="movies") #if movies: # l.info('Memcache hit in movies') # return movies q = Movie.all() l.info((order, direction, minvotes, maxvotes, minrating, maxrating)) maxrating = int(maxrating*10) minrating = int(minrating*10) if maxvotes > 200000: maxvotes = () prefix = '' if direction == 'descending': prefix = '-' #if minvotes > 0: q = q.filter('imdbvotes >=', minvotes) #if maxvotes < 300000: q = q.filter('imdbvotes <=', maxvotes) #if minrating > 0: q = q.filter('imdbrating >=', minrating) #if maxrating < 100: q = q.filter('imdbrating <=', minrating) if order == 'age': q = q.order(prefix+'nzbdate') if order == 'name': q = q.order(prefix+'rlsname') if order == 'imdbrating': q = q.order(prefix+'imdbrating') if order == 'imdbvotes': q = q.order(prefix+'imdbvotes') if cursor: q = q.with_cursor(cursor) #tm = {} tm = [] i = [] count = 0 date = None for m in q: if count == 20: break if not m.imdbrating: m.imdbrating = 0 if not m.imdbvotes: m.imdbvotes = 0 #How nice, a table scan :( if not (minrating <= m.imdbrating <= maxrating and minvotes <= m.imdbvotes <= maxvotes): continue else: count += 1 if m.imdbid == None: m.imdbid = re.search('tt\d{7}', m.imdblink).group(0) #m.put() template_movie = {'type': 'movie'} #template_movie['counter'] = count template_movie['nzblink'] = m.nzblink template_movie['rlsname'] = m.rlsname template_movie['prettydate'] = util.pretty_date(m.nzbdate) template_movie['imdblink'] = m.imdblink template_movie['rtlink'] = 'http://www.rottentomatoes.com/alias?type=imdbid&s=%s' % m.imdbid[2:] if m.imdbinfo: template_movie['imdbinfo?'] = True template_movie['rating'] = m.imdbinfo.rating / 10.0 template_movie['votes'] = m.imdbinfo.votes template_movie['imdbinfo'] = m.imdbinfo if m.imdbinfo.covers.count(1): template_movie['cover'] = '/serve/%s' % m.imdbinfo.covers[0].blobkey.key() i.append(template_movie) else: template_movie['imdbinfo?'] = False if not date == m.nzbdate.date(): date = m.nzbdate.date() tm.append({'d': date, 'day': date.strftime('%A, %d %B %Y'), 'type': 'date'}) #if not date in tm: tm[date] = {'d': date, 'day': date.strftime('%A, %d %B %Y'), 'movies': []} tm.append(template_movie) if count >= total_count: break movies = (tm, i, q.cursor()) memcache.set(key, movies, namespace='movies') return movies
def process_search(pins, config, deleted_url, starred_url, launch_hist_url, tags_list, q, full_query, category, sort_option): results = [] qs = map(lambda a: a.strip(), q.lower().split('|')) for p in pins: url = p['href'].lower() if url in map(lambda x: x.lower(), deleted_url): continue title = p['description'].lower() extended = p['extended'].lower() try: tags = p['tags'].lower() except: tags = "" toread = p['toread'] tag_set = set(tags.split(' ')) if tags_list and tag_set.isdisjoint(tags_list): continue tagstring = tags and "(" + ", ".join( map(lambda a: '#' + a, tags.split(' '))) + ")" or "(none)" if not q: if category == 'toread': if toread == 'yes': add_result(results, p['description'], p['href'], tagstring, starred_url, launch_hist_url) elif category == 'star': if p['href'] in starred_url: add_result(results, p['description'], p['href'], tagstring, starred_url, launch_hist_url) elif category == 'log': if p['href'] in launch_hist_url: add_result(results, p['description'], p['href'], tagstring, starred_url, launch_hist_url) else: add_result(results, p['description'], p['href'], tagstring, starred_url, launch_hist_url) else: for qi in qs: if category == 'all' and all( qsi and pred(qsi, [title, extended, tags]) for qsi in qi.split(' ')): add_result(results, p['description'], p['href'], tagstring, starred_url, launch_hist_url) break elif category == 'log' and p['href'] in launch_hist_url and all( qsi and pred(qsi, [title, extended, tags]) for qsi in qi.split(' ')): add_result(results, p['description'], p['href'], tagstring, starred_url, launch_hist_url) break elif category == 'star' and p['href'] in starred_url and all( qsi and pred(qsi, [title, extended, tags]) for qsi in qi.split(' ')): add_result(results, p['description'], p['href'], tagstring, starred_url, launch_hist_url) break elif category == 'toread' and toread == 'yes' and all( qsi and pred(qsi, [title]) for qsi in qi.split(' ')): add_result(results, p['description'], p['href'], tagstring, starred_url, launch_hist_url) break elif category == 'link' and any(qsi and pred(qsi, [url]) for qsi in qi.split(' ')): add_result(results, p['description'], p['href'], tagstring, starred_url, launch_hist_url) break if PIN_MAX_RESULT > 0 and len(results) >= PIN_MAX_RESULT: break logger.info(category) if sort_option == 'a' or sort_option == 'ㅇ': results = sorted(results, key=lambda k: k['title']) elif sort_option == 'z' or sort_option == 'ㅁ': results = sorted(results, key=lambda k: k['title'], reverse=True) elif sort_option == 'd' or sort_option == 'ㅣ': results.reverse() elif sort_option == 'l' or sort_option == 'ㅈ': results = sorted(results, key=lambda k: k['last_access'], reverse=True) resultData = [ alfred.Item(title=f['title'], subtitle=urlparse.urlparse(f['url'])[1] + " " + f['tags'] + f['click'], attributes={ 'arg': f['url'], 'uid': alfred.uid(idx) }, icon="item.png") for (idx, f) in enumerate(results) ] try: last_updated = config['last_updated'] except: last_updated = 0 subtitle = "Last updated: " + ( last_updated and util.pretty_date(config['last_updated']) or "no info") diff = int(time.time()) - last_updated if diff > RELOAD_ASK_THRESHOLD: resultData.insert( 0, alfred.Item(title="Links: %d items - Reload pinboard data?" % len(results), subtitle=subtitle, attributes={ 'arg': 'reload', 'uid': alfred.uid('t') }, icon="icon.png")) else: resultData.insert( 0, alfred.Item(title="Links: %d items" % len(results), subtitle=subtitle, attributes={ 'valid': 'no', 'uid': alfred.uid('t') }, icon="icon.png")) pinboard_url = q and 'https://pinboard.in/search/?query=%s&mine=Search+Mine' % q.replace( ' ', '+') or 'https://pinboard.in/' pinboard_title = q and 'Search \'%s\' in pinboard.in' % q or 'Goto Pinboard' resultData.append( alfred.Item(title=pinboard_title, subtitle=pinboard_url, attributes={'arg': pinboard_url}, icon="icon.png")) alfred.write(alfred.xml(resultData, maxresults=None)) update_history(category, full_query, len(results)) return
def process_search(pins,config,deleted_url,starred_url,launch_hist_url,tags_list,q,full_query,category,sort_option): results = [] qs = map(lambda a:a.strip(), q.lower().split('|')) for p in pins: url = p['href'].lower() if url in map(lambda x:x.lower(), deleted_url): continue title = p['description'].lower() extended = p['extended'].lower() try: tags = p['tags'].lower() except: tags = "" toread = p['toread'] tag_set = set(tags.split(' ')) if tags_list and tag_set.isdisjoint(tags_list): continue tagstring = tags and "("+", ".join(map(lambda a:'#'+a, tags.split(' ')))+")" or "(none)" if not q: if category=='toread': if toread=='yes': add_result(results,p['description'],p['href'],tagstring,starred_url,launch_hist_url) elif category=='star': if p['href'] in starred_url: add_result(results,p['description'],p['href'],tagstring,starred_url,launch_hist_url) elif category=='log': if p['href'] in launch_hist_url: add_result(results,p['description'],p['href'],tagstring,starred_url,launch_hist_url) else: add_result(results,p['description'],p['href'],tagstring,starred_url,launch_hist_url) else: for qi in qs: if category=='all' and all(qsi and pred(qsi,[title,extended,tags]) for qsi in qi.split(' ')): add_result(results,p['description'],p['href'],tagstring,starred_url,launch_hist_url) break elif category=='log' and p['href'] in launch_hist_url and all(qsi and pred(qsi,[title,extended,tags]) for qsi in qi.split(' ')): add_result(results,p['description'],p['href'],tagstring,starred_url,launch_hist_url) break elif category=='star' and p['href'] in starred_url and all(qsi and pred(qsi,[title,extended,tags]) for qsi in qi.split(' ')): add_result(results,p['description'],p['href'],tagstring,starred_url,launch_hist_url) break elif category=='toread' and toread=='yes' and all(qsi and pred(qsi,[title]) for qsi in qi.split(' ')): add_result(results,p['description'],p['href'],tagstring,starred_url,launch_hist_url) break elif category=='link' and any(qsi and pred(qsi,[url]) for qsi in qi.split(' ')): add_result(results,p['description'],p['href'],tagstring,starred_url,launch_hist_url) break if PIN_MAX_RESULT>0 and len(results)>=PIN_MAX_RESULT: break logger.info(category) if sort_option=='a' or sort_option=='ㅇ': results = sorted(results, key=lambda k:k['title']) elif sort_option=='z' or sort_option=='ㅁ': results = sorted(results, key=lambda k:k['title'],reverse=True) elif sort_option=='d' or sort_option=='ㅣ': results.reverse() elif sort_option=='l' or sort_option=='ㅈ': results = sorted(results, key=lambda k:k['last_access'],reverse=True) resultData = [alfred.Item(title=f['title'], subtitle=urlparse.urlparse(f['url'])[1]+" "+f['tags']+f['click'], attributes={'arg':f['url'],'uid':alfred.uid(idx)}, icon="item.png") for (idx,f) in enumerate(results)] try: last_updated = config['last_updated'] except: last_updated = 0 subtitle = "Last updated: "+(last_updated and util.pretty_date(config['last_updated']) or "no info") diff = int(time.time())-last_updated if diff > RELOAD_ASK_THRESHOLD: resultData.insert(0,alfred.Item(title="Links: %d items - Reload pinboard data?"%len(results), subtitle=subtitle, attributes={'arg':'reload','uid':alfred.uid('t')}, icon="icon.png")) else: resultData.insert(0,alfred.Item(title="Links: %d items"%len(results), subtitle=subtitle, attributes={'valid':'no','uid':alfred.uid('t')}, icon="icon.png")) pinboard_url = q and 'https://pinboard.in/search/?query=%s&mine=Search+Mine'%q.replace(' ','+') or 'https://pinboard.in/' pinboard_title = q and 'Search \'%s\' in pinboard.in'%q or 'Goto Pinboard' resultData.append(alfred.Item(title=pinboard_title, subtitle=pinboard_url, attributes={'arg':pinboard_url}, icon="icon.png")) alfred.write(alfred.xml(resultData,maxresults=None)) update_history(category,full_query,len(results)) return
def handleGetAttr(obj): u = RUser(obj['u'], 0, r) msgs = [] if not u.exists(): msgs.append({"a": "sendto", "user": obj['admin'], "msg": "{C_RED}{C_ITALIC}No such user!"}) return push(obj['sid'], msgs) if obj['attr'] == "inf": msgs.append({"a": "sendto", "user": obj['admin'], "msg": "{C_RED}Infractions for {C_GRAY}'{C_AQUA}%s{C_GRAY}'" % u.name}) for i, inf in enumerate(u.getInfractions()[:30]): post = "" if i > 30: break if inf['type'] == "ban": inf['type'] = "{C_RED}BAN" else: inf['type'] = "{C_YELLOW}TEMPBAN" if inf['type'] == "tban" and datetime.fromtimestamp(inf['expires']) < datetime.now(): post = "{C_GOLD} -> {C_GRAY}[{C_LPURPLE}Expired{C_GRAY}]" elif inf['status'] == 2: post = "{C_GOLD} -> {C_GRAY}[{C_GREEN}Disputed{C_GRAY}]" m = "{C_AQUA}%s {C_GOLD}-{C_AQUA} %s {C_GOLD}-{C_AQUA} %s {C_GOLD}-{C_DAQUA}{C_ITALIC} %s {C_GOLD}- {C_GRAY}[{C_BLUE}%s{C_GRAY}]" % (i+1, inf['type'], inf['mod'], inf['msg'], pretty_date(datetime.fromtimestamp(inf['time']))) msgs.append({"a": "sendto", "user": obj['admin'], "msg": m+post}) if len(msgs) == 1: msgs.append({"a": "sendto", "user": obj['admin'], "msg": "{C_RED}{C_ITALIC} None!"}) elif obj['attr'] == "act": msgs.append({"a": "sendto", "user": obj['admin'], "msg": "{C_RED}History for {C_GRAY}'{C_AQUA}%s{C_GRAY}'" % u.name}) for i, h in enumerate(u.getHistory()[:30]): m = "{C_GREEN}%s: {C_AQUA}%s {C_GOLD}-> {C_RED}%s {C_GOLD}-> {C_DAQUA}%s {C_GRAY}[{C_BLUE}%s{C_GRAY}]" % (i+1, h['admin'], h['type'].title(), h['msg'], pretty_date(datetime.fromtimestamp(h['time']))) msgs.append({"a": "sendto", "user": obj['admin'], "msg": m}) if len(msgs) == 1: msgs.append({"a": "sendto", "user": obj['admin'], "msg": "{C_RED}{C_ITALIC} None!"}) push(obj['sid'], msgs)
try: q = unicode(sys.argv[2].strip()) q = unicodedata.normalize('NFC', q) except: q = "" history = main.history_data() if sys.argv[1] == "search": results = [] history.sort(cmp=compare_key,reverse=False) for h in history: if q=="" or q in h[1]: results.append(alfred.Item(title=(h[4] and main.STAR or "")+h[1]+" (%d)"%h[2], # // subtitle=time.strftime('%Y.%m.%d %H:%M:%S', time.localtime(h[3])), subtitle = util.pretty_date(h[3]), attributes={'arg':h[1]}, icon="icon.png")) alfred.write(alfred.xml(results,maxresults=20)) elif sys.argv[1] == "delete": for h in history: if q == h[1]: history.remove(h) break with open(os.path.join(alfred.work(False), 'search-history.json'), 'w+') as myFile: myFile.write(json.dumps(history)) elif sys.argv[1] == "star": for h in history: if q == h[1]: h[4] = not h[4] break with open(os.path.join(alfred.work(False), 'search-history.json'), 'w+') as myFile:
def get_spoke(bot, args): db = BotDB(bot).connect() if len(args) > 1: channel = bot.remote['receiver'] nick = bot.remote['nick'] quotes = Quote.select().where(Quote.channel == channel, Quote.nick ** ("%%%s%%" % args[1])).order_by(Quote.time.desc()).limit(1) ids = [] for q in quotes.naive(): ids.append(q.id) if len(ids) == 0: util.answer(bot, "I don't know anything about %s, %s..." % (args[1], nick)) else: quote = Quote.get(Quote.id == ids[0]) time = datetime.datetime.fromtimestamp(int(quote.time)) output = "%s, %s last spoke in %s at %s (%s), saying: %s" % (nick, quote.nick, channel, str(time), util.pretty_date(time), quote.message) output = output.encode('utf8') util.answer(bot, output) else: util.give_help(bot, args[0], "<nick>")
def print_training_times(start_sample_time, end_sample_time): print( "Training time period used: " + util.pretty_date(start_sample_time) + " to " + util.pretty_date( end_sample_time))
def gh_linkscan(bot, url): try: if not re.match("http(s)?://github.com", url): bot._debug("I don't know what %s is..." % url) return None gh = gh_inst(bot) if isinstance(gh, basestring): bot._debug("Error: %s" % gh) return gh r_repo = "^https?://github.com/([A-Za-z0-9-]+)/([A-Za-z0-9-\.]+)" r_commit = "^https?://github.com/[A-Za-z0-9-]+/[A-Za-z0-9-\.]+/commit/([A-Za-z0-9]+)" r_blob = "^https?://github.com/[A-Za-z0-9-]+/[A-Za-z0-9-\.]+/blob/([A-Za-z0-9]+)/(.*)" r_tree = "^https?://github.com/[A-Za-z0-9-]+/[A-Za-z0-9-\.]+/tree/([A-Za-z0-9]+)/(.*)" r_issue = "^https?://github.com/[A-Za-z0-9-]+/[A-Za-z0-9-\.]+/(issues|pull)/(\d+)" r = re.match(r_repo, url) if not r: return None repo = "%s/%s" % r.group(1, 2) ghrepo = gh.get_repo(repo) bot._debug("Repo: %s" % repo) commit = re.match(r_commit, url) blob = re.match(r_blob, url) tree = re.match(r_tree, url) issue = re.match(r_issue, url) if commit: bot._debug("Commit SHA: %s" % commit.group(1)) commit = ghrepo.get_commit(commit.group(1)) fmt = "GitHub: \x02%s\x0f commit \x02%s\x0f: %s [/%s] [\x033+%s\x0f \x035-%s\x0f]" % (repo, commit.sha[:8], commit.commit.message, commit.author.login, util.metric(commit.stats.additions), util.metric(commit.stats.deletions)) return fmt.encode('utf-8') elif blob: bot._debug("Blob: [%s] %s" % blob.group(1, 2)) ref = blob.group(1) blob = ghrepo.get_contents(path=blob.group(2), ref=ref) fmt = "GitHub: \x02%s\x0f file \x02%s\x0f [%s, branch %s]" % (repo, blob.name, util.metric(blob.size), ref) return fmt.encode('utf-8') elif tree: bot._debug("Tree: [%s] %s" % tree.group(1, 2)) ref, path = tree.group(1, 2) tree = ghrepo.get_dir_contents(path=path, ref=ref) fmt = "GitHub: \x02%s\x0f dir \x02%s\x0f [%s files, branch %s]" % (repo, path, util.metric(len(tree)), ref) return fmt.encode('utf-8') elif issue: id = issue.group(2) bot._debug("Issue ID: #%s" % id) issue = ghrepo.get_issue(int(id)) assigned_to = issue.assignee.login if issue.assignee else 'no one' if issue.state == "open": fmt = "GitHub: \x02%s\x0f issue \x02#%s\x0f: %s [by %s, %s assigned, created %s, updated %s]" % (repo, id, issue.title, issue.user.login, assigned_to, util.pretty_date(issue.created_at), util.pretty_date(issue.updated_at)) else: fmt = "GitHub: \x02%s\x0f issue \x02#%s\x0f: %s [by %s, \x035closed\x0f by %s %s]" % (repo, id, issue.title, issue.user.login, issue.closed_by.login, util.pretty_date(issue.closed_at)) return fmt.encode('utf-8') else: forks = str(ghrepo.forks) watchers = str(ghrepo.watchers) fmt = "GitHub: \x02%s\x0f [%sf %sw] last updated %s" % (repo, forks, watchers, util.pretty_date(ghrepo.pushed_at)) return fmt.encode('utf-8') except: exc_type, exc_value, exc_traceback = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout) raise