def get_randomse(): if not config.enable_long_fortunes(): return None try: questionid = random.choice([{'qid':22299, 'site':"mathoverflow.net"}, {'qid':1083, 'site':"mathoverflow.net"}, {'qid':7155, 'site':"mathoverflow.net"}, {'qid':2144, 'site':"mathoverflow.net"}, {'qid':14574, 'site':"mathoverflow.net"}, {'qid':879, 'site':"mathoverflow.net"}, {'qid':16829, 'site':"mathoverflow.net"}, {'qid':47214, 'site':"mathoverflow.net"}, {'qid':44326, 'site':"mathoverflow.net"}, {'qid':29006, 'site':"mathoverflow.net"}, {'qid':38856, 'site':"mathoverflow.net"}, {'qid':7584, 'site':"mathoverflow.net"}, {'qid':117668, 'site':"mathoverflow.net"}, {'qid':8846, 'site':"mathoverflow.net"}, {'qid':178139, 'site':"mathoverflow.net"}, {'qid':42512, 'site':"mathoverflow.net"}, {'qid':4994, 'site':"mathoverflow.net"}, {'qid':733754, 'site':"math.stackexchange.com"}, {'qid':323334, 'site':"math.stackexchange.com"}, {'qid':178940, 'site':"math.stackexchange.com"}, {'qid':111440, 'site':"math.stackexchange.com"}, {'qid':250, 'site':"math.stackexchange.com"}, {'qid':820686, 'site':"math.stackexchange.com"}, {'qid':505367, 'site':"math.stackexchange.com"}, {'qid':362446, 'site':"math.stackexchange.com"}, {'qid':8814, 'site':"math.stackexchange.com"}, {'qid':260656, 'site':"math.stackexchange.com"}, {'qid':2949, 'site':"math.stackexchange.com"}, {'qid':4351, 'site':"matheducators.stackexchange.com"}, {'qid':1817, 'site':"matheducators.stackexchange.com"}, ]) question = requests.get('https://api.stackexchange.com/questions/' + str(questionid['qid']) + '?site=' + str(questionid['site']) + '&filter=!gB66oJbwvcXSH(Ni5Ti9FQ4PaxMw.WKlBWC', timeout=config.external_request_timeout()) question.raise_for_status() question = question.json()['items'][0] ansid = random.choice(question['answers'])['answer_id'] fullrequest = requests.get('https://api.stackexchange.com/answers/' + str(ansid) + '?site=' + str(questionid['site']) + '&filter=!Fcb(61J.xH8s_mAfP-LmG*7fPe', timeout=config.external_request_timeout()) fullrequest.raise_for_status() fullrequest = fullrequest.json() answer = fullrequest['items'][0] randomse = {'qtitle': unescape(question['title']), 'qbody': question['body'], 'qlink': question['link'], 'abody': answer['body'], 'alink': answer['link'], 'ascore': answer['score'], 'quota': fullrequest['quota_remaining'] } return randomse except requests.Timeout as ex: print(ex) print('*** Note: get_randomse() timed out.') return None except Exception as ex: randomse = {'qtitle': 'Exception occurred', 'qbody': str(ex), 'qlink': 'https://xkcd.com/1084/', 'abody': '<img src="https://imgs.xkcd.com/comics/error_code.png"/>', 'alink': 'https://xkcd.com/1024/', 'ascore': '', 'quota': '' } return randomse
def convert_links(self, html, vals, blacklist=None): for match in re.findall(URL_REGEX, html): short_schema = self.env['ir.config_parameter'].sudo().get_param('web.base.url') + '/r/' href = match[0] long_url = match[1] vals['url'] = unescape(long_url) if not blacklist or not [s for s in blacklist if s in long_url] and not long_url.startswith(short_schema): link = self.create(vals) shorten_url = self.browse(link.id)[0].short_url if shorten_url: new_href = href.replace(long_url, shorten_url) html = html.replace(href, new_href) return html
def pingback(source_uri, target_uri): """Try to notify the server behind `target_uri` that `source_uri` points to `target_uri`. If that fails an `PingbackError` is raised. """ try: response = open_url(target_uri) except: raise PingbackError(32) try: pingback_uri = response.headers["X-Pingback"] except KeyError: match = _pingback_re.search(response.data) if match is None: raise PingbackError(33) pingback_uri = unescape(match.group(1)) rpc = ServerProxy(pingback_uri) try: return rpc.pingback.ping(source_uri, target_uri) except Fault, e: raise PingbackError(e.faultCode)
def pingback(source_uri, target_uri): """Try to notify the server behind `target_uri` that `source_uri` points to `target_uri`. If that fails an `PingbackError` is raised. """ try: response = open_url(target_uri) except: raise PingbackError(32) try: pingback_uri = response.headers['X-Pingback'] except KeyError: match = _pingback_re.search(response.data) if match is None: raise PingbackError(33) pingback_uri = unescape(match.group(1)) rpc = ServerProxy(pingback_uri) try: return rpc.pingback.ping(source_uri, target_uri) except Fault, e: raise PingbackError(e.faultCode)
def import_livejournal(self, username, password, import_what=IMPORT_JOURNAL, community='', security_custom=SECURITY_PROTECTED, categories=[], getcomments=True): """Import from LiveJournal using specified parameters.""" yield _(u'<p>Beginning LiveJournal import. Attempting to login...</p>') if import_what != IMPORT_JOURNAL: usejournal = community else: usejournal = None lj = LiveJournalConnect(username, password, usejournal) result = lj.login(getmoods=0) authors = { username: Author(username=username, email='', real_name=unicode(result['fullname'], 'utf-8')) } yield _(u'<p>Your name: <strong>%s</strong></p>') % \ authors[username].real_name moodlist = dict([(int(m['id']), unicode(str(m['name']), 'utf-8')) for m in result['moods']]) result = lj.getusertags() tags = dict([ (tag, Tag(gen_slug(tag), tag)) for tag in [unicode(t['name'], 'utf-8') for t in result['tags']] ]) yield _(u'<p><strong>Tags:</strong> %s</p>') % _(u', ').join( tags.keys()) ##result = lj.getdaycounts() ##daycounts = [(date(*strptime(item['date'], '%Y-%m-%d')[0:3]), ## item['count']) for item in result['daycounts']] ##totalposts = sum([x[1] for x in daycounts]) ##yield _(u'<p>Found <strong>%d</strong> posts on <strong>%d days'\ ## u'</strong> between %s and %s.</p>') % ( ## totalposts, ## len(daycounts), ## daycounts[0][0].strftime('%Y-%m-%d'), ## daycounts[-1][0].strftime('%Y-%m-%d')) posts = {} # Process implemented as per # http://www.livejournal.com/doc/server/ljp.csp.entry_downloading.html yield _(u'<ul>') yield _(u'<li>Getting metadata...</li>') result = lj.syncitems() sync_items = [] sync_total = int(result['total']) yield _(u'<li>%d items...</li>') % sync_total sync_items.extend(result['syncitems']) while len(sync_items) < sync_total: lastsync = max([ parse_lj_date(item['time']) for item in sync_items ]).strftime('%Y-%m-%d %H:%M:%S') yield _(u'<li>Got %d items up to %s...</li>') % (len(sync_items), lastsync) result = lj.syncitems(lastsync=lastsync) sync_items.extend(result['syncitems']) yield _(u'<li>Got all %d items.</li>') % len(sync_items) yield _(u'</ul>') #: Discard non-journal items. sync_items = [i for i in sync_items if i['item'].startswith('L-')] yield _(u'<p>Downloading <strong>%d</strong> entries...</p>') % len( sync_items) # Track what items we need to get sync_data = {} for item in sync_items: sync_data[int(item['item'][2:])] = { 'downloaded': False, 'time': parse_lj_date(item['time']) } # Start downloading bodies sync_left = [ sync_data[x] for x in sync_data if sync_data[x]['downloaded'] is False ] if sync_left: lastsync = (min([x['time'] for x in sync_left]) - timedelta(seconds=1)).strftime('%Y-%m-%d %H:%M:%S') while len(sync_left) > 0: yield _(u'<p>Getting a batch...</p>') try: result = lj.getevents(selecttype='syncitems', lastsync=lastsync) except xmlrpclib.Fault, fault: if fault.faultCode == 406: # LJ doesn't like us. Go back one second and try again. yield _(u'<p>LiveJournal says we are retrying the same '\ u'date and time too often. Trying again with the '\ u'time set behind by one second.</p>') lastsync = ( parse_lj_date(lastsync) - timedelta(seconds=1)).strftime('%Y-%m-%d %H:%M:%S') continue else: yield _(u'<p>Process failed. LiveJournal says: '\ u'(%d) %s</p>') % (fault.faultCode, fault.faultString) break yield _(u'<ol start="%d">') % (len(posts) + 1) for item in result['events']: if sync_data[item['itemid']]['downloaded'] is True: # Dupe, thanks to our lastsync time manipulation. Skip. continue sync_data[item['itemid']]['downloaded'] = True sync_data[item['itemid']]['item'] = item subject = item.get('subject', '') if isinstance(subject, xmlrpclib.Binary): subject = subject.data subject = unicode(str(subject), 'utf-8') #: LiveJournal subjects may contain HTML tags. Strip them and #: convert HTML entities to Unicode equivalents. subject = unescape( tag_re.sub('', ljuser_re.sub('\\2', subject))) poster = item.get('poster', username) if poster != username and import_what != IMPORT_COMMUNITY_ALL: # Discard, since we don't want this. yield _( u'<li><strong>Discarded:</strong> %s <em>(by %s)</em></li>' ) % (subject, poster) continue if poster not in authors: authors[poster] = Author(poster, '', '') # Map LiveJournal security codes to Zine status flags security = item.get('security', 'public') if security == 'usemask' and item['allowmask'] == 1: security = 'friends' if security == 'usemask': status = { SECURITY_DISCARD: None, SECURITY_PUBLIC: STATUS_PUBLISHED, SECURITY_PROTECTED: STATUS_PROTECTED, SECURITY_PRIVATE: STATUS_PRIVATE }[security_custom] if status is None: yield _(u'<li><strong>Discarded (masked):</strong> '\ u'%s</li>') % subject continue else: status = { 'public': STATUS_PUBLISHED, 'friends': STATUS_PROTECTED, 'private': STATUS_PRIVATE, }[security] #: Read time as local timezone and then convert to UTC. Zine #: doesn't seem to like non-UTC timestamps in imports. pub_date = get_timezone().localize( parse_lj_date(item['eventtime'])).astimezone(UTC) itemtags = [ t.strip() for t in unicode( item['props'].get('taglist', ''), 'utf-8').split(',') ] while '' in itemtags: itemtags.remove('') itemtags = [tags[t] for t in itemtags] extras = {} if 'current_music' in item['props']: if isinstance(item['props']['current_music'], xmlrpclib.Binary): extras['current_music'] = unicode( item['props']['current_music'].data, 'utf-8') else: extras['current_music'] = unicode( str(item['props']['current_music']), 'utf-8') if 'current_mood' in item['props']: if isinstance(item['props']['current_mood'], xmlrpclib.Binary): extras['current_mood'] = unicode( item['props']['current_mood'].data, 'utf-8') else: extras['current_mood'] = unicode( str(item['props']['current_mood']), 'utf-8') elif 'current_moodid' in item['props']: extras['current_mood'] = moodlist[int( item['props']['current_moodid'])] if 'current_coords' in item['props']: if isinstance(item['props']['current_coords'], xmlrpclib.Binary): extras['current_coords'] = unicode( item['props']['current_coords'].data, 'utf-8') else: extras['current_coords'] = unicode( str(item['props']['current_coords']), 'utf-8') if 'current_location' in item['props']: if isinstance(item['props']['current_location'], xmlrpclib.Binary): extras['current_location'] = unicode( item['props']['current_location'].data, 'utf-8') else: extras['current_location'] = unicode( str(item['props']['current_location']), 'utf-8') if 'picture_keyword' in item['props']: if isinstance(item['props']['picture_keyword'], xmlrpclib.Binary): extras['picture_keyword'] = unicode( item['props']['picture_keyword'].data, 'utf-8') else: extras['picture_keyword'] = unicode( str(item['props']['picture_keyword']), 'utf-8') extras['lj_post_id'] = item['itemid'] extras['original_url'] = item['url'] posts[item['itemid']] = Post( #: Generate slug. If there's no subject, use '-'+itemid. #: Why the prefix? Because if the user wants %year%/%month%/ #: for the post url format and we end up creating a slug #: like 2003/12/1059, it will conflict with the archive #: access path format of %Y/%m/%d and the post will become #: inaccessible, since archive paths take higher priority #: to slugs in zine's urls.py. slug=gen_timestamped_slug( gen_slug(subject) or ('-' + str(item['itemid'])), 'entry', pub_date), title=subject, link=item['url'], pub_date=pub_date, author=authors[poster], intro='', body=isinstance(item['event'], xmlrpclib.Binary) and unicode(item['event'].data, 'utf-8') or url_unquote_plus(str(item['event'])), tags=itemtags, categories=[Category(x) for x in categories], comments=[], # Will be updated later. comments_enabled=not item['props'].get( 'opt_nocomments', False), pings_enabled=False, # LiveJournal did not support pings uid='livejournal;%s;%d' % (usejournal or username, item['itemid']), parser=item['props'].get('opt_preformatted', False) and 'html' or 'livejournal', status=status, extra=extras) yield _(u'<li>%s <em>(by %s on %s)</em></li>') % ( subject, poster, pub_date.strftime('%Y-%m-%d %H:%M')) # Done processing batch. yield _(u'</ol>') sync_left = [ sync_data[x] for x in sync_data if sync_data[x]['downloaded'] is False ] if sync_left: lastsync = (min([x['time'] for x in sync_left]) - timedelta(seconds=1)).strftime('%Y-%m-%d %H:%M:%S')
def math(self): ret = '' for node in self.children: if node: ret += node.html(br=False) return unescape(ret)
def get_channel_list(): if 'uid' in session: print("[get_channel_list] It comes here.") print(session['uid']) conn = connect_db() cur = conn.cursor() cur.execute('SELECT username, banned FROM `user` WHERE id=?', (session['uid'], )) # fetch the username row_user = cur.fetchone() if row_user is not None: username = row_user[0] print('line 419', username) channels_that_banned_user = row_user[ 1] # a list of channels where the user gets banned channel_list = [] topics = [] adminsOfChannels = [] cur.execute('SELECT channelname, topic, admin FROM `channel`') all_of_them = cur.fetchall() all_channels_list = [] all_topics_list = [] all_admins_list = [] print('line 429', all_of_them) for channel in all_of_them: all_channels_list.append(channel[0]) print('line 432', all_channels_list) for topic in all_of_them: all_topics_list.append(topic[1]) print('line 435', all_topics_list) for admin in all_of_them: all_admins_list.append(admin[2]) print('line 438', all_admins_list) # filter out the channels that the user get banned from if all_channels_list == None: conn.commit() conn.close() return jsonify({ 'channel_list': None, 'topic': None, 'admin': None }) new_channels_that_banned_user = [] if channels_that_banned_user != None: channels_that_banned_user = channels_that_banned_user.split( '#') for channel in channels_that_banned_user: temp = '#' + channel new_channels_that_banned_user.append(temp) for index in range(len(all_channels_list)): channel = all_channels_list[index] if channel not in new_channels_that_banned_user: channel_list.append(channel) topics.append(werkzeug.unescape(all_topics_list[index])) adminsOfChannels.append(all_admins_list[index]) conn.commit() conn.close() print('line 458', channel_list) print('line 459', topics) print('line 460', adminsOfChannels) return jsonify({ 'channel_list': channel_list, 'topic': topics, 'admin': adminsOfChannels }) else: conn.commit() conn.close() flash("You have not logged in yet. Please log in first", 'error') return redirect('/login') else: flash("You have not logged in yet. Please log in first", 'error') return redirect('/login')