def to_embed_code(url, width=437, height=246): youtube_embed_code_template = '<iframe width="%s" height="%s" src="https://www.youtube.com/embed/%s?wmode=opaque" frameborder="0" allowfullscreen></iframe>' if not url.startswith('http'): urls = api.extract_urls(url) if urls: url = urls[0] if 'www.youtube.com/' in url: video_id = url.rsplit('?v=', 1)[-1].split('&', 1)[0] embed_code = youtube_embed_code_template % (width, height, video_id) elif 'youtu.be/' in url: video_id = url.rsplit('/', 1)[-1].split('&', 1)[0] embed_code = youtube_embed_code_template % (width, height, video_id) else: embed_code = '' return embed_code
def urls(self): return [api.get_url_info(u) for u in api.extract_urls(self.message)]
def urls(self): return [api.get_url_info(u, db_name=self.db_name) \ for u in api.extract_urls(self.message)]
def autolink(text): if not text: return text key = '%s:autolink' % hash(text) out = cache.get(key, namespace="filters") if out: return out if re.match(EMAIL_RE, text): email = text user_id = api.get_user_id_from_email_address(email) user = api.get_user_info(user_id) return '<a href="/user/%s" class="async">%s</a>' % (user.id, user.name) s = text or '' s += ' ' s = str(s) # convert unicode to string s = s.replace('\r\n', '\n') urls = api.extract_urls(s) urls = list(set(urls)) urls.sort(key=len, reverse=True) for url in urls: hash_string = md5(url).hexdigest() info = api.get_url_info(url) if not url.startswith('http'): s = s.replace( url, '<a href="http://%s/" target="_blank" title="%s">%s</a>' % (hash_string, info.title if info.title else hash_string, hash_string)) elif len(url) > 60: u = url[:60] for template in ['%s ', ' %s', '\n%s', '%s\n', '%s.', '%s,']: if template % url in s: s = s.replace( template % url, template % ('<a href="%s" target="_blank" title="%s">%s</a>' % (hash_string, info.title if info.title else hash_string, md5(u + '...').hexdigest()))) break else: for template in ['%s ', ' %s', '\n%s', '%s\n', '%s.', '%s,']: if template % url in s: s = s.replace( template % url, template % ('<a href="%s" target="_blank" title="%s">%s</a>' % (hash_string, info.title if info.title else hash_string, hash_string))) break for url in urls: s = s.replace(md5(url).hexdigest(), url) if len(url) > 60 and url.startswith('http'): s = s.replace(md5(url[:60] + '...').hexdigest(), url[:60] + '...') mentions = MENTIONS_RE.findall(s) if mentions: for mention in mentions: if '](topic:' in mention: topic = re.compile('@\[(?P<name>.+)\]\((?P<id>.*)\)').match( mention).groupdict() topic['id'] = topic['id'].split(':', 1)[-1] #TODO: update topic name? s = s.replace( mention, '<a href="/chat/topic/%s" class="chat">%s</a>' % (topic.get('id'), topic.get('name'))) elif '](user:'******'@\[(?P<name>.+)\]\((?P<id>.*)\)').match( mention).groupdict() user['id'] = user['id'].split(':', 1)[-1] s = s.replace( mention, '<a href="/user/%s" class="async"><span class="tag">%s</span></a>' % (user.get('id'), user.get('name'))) else: group = re.compile('@\[(?P<name>.+)\]\((?P<id>.*)\)').match( mention).groupdict() group['id'] = group['id'].split(':', 1)[-1] s = s.replace( mention, '<a href="/group/%s" class="async"><span class="tag">%s</span></a>' % (group.get('id'), group.get('name'))) # hashtags = re.compile('(#\[.*?\))').findall(s) # if hashtags: # for hashtag in hashtags: # tag = re.compile('#\[(?P<name>.+)\]\((?P<id>.*)\)').match(hashtag).groupdict() # tag['id'] = tag['id'].split(':', 1)[-1] # s = s.replace(hashtag, # '<a href="?hashtag=%s" class="overlay"><span class="tag">%s</span></a>' % (tag.get('id'), tag.get('name'))) cache.set(key, s, namespace="filters") return s
def autolink(text): if not text: return text key = '%s:autolink' % hash(text) out = cache.get(key, namespace="filters") if out: return out if re.match(EMAIL_RE, text): email = text user_id = api.get_user_id_from_email_address(email) user = api.get_user_info(user_id) return '<a href="/user/%s" class="async">%s</a>' % (user.id, user.name) s = text or '' s += ' ' s = str(s) # convert unicode to string s = s.replace('\r\n', '\n') urls = api.extract_urls(s) urls = list(set(urls)) urls.sort(key=len, reverse=True) for url in urls: hash_string = md5(url).hexdigest() info = api.get_url_info(url) if not url.startswith('http'): s = s.replace(url, '<a href="http://%s/" target="_blank" title="%s">%s</a>' % (hash_string, info.title if info.title else hash_string, hash_string)) elif len(url) > 60: u = url[:60] for template in ['%s ', ' %s', '\n%s', '%s\n', '%s.', '%s,']: if template % url in s: s = s.replace(template % url, template % ('<a href="%s" target="_blank" title="%s">%s</a>' % (hash_string, info.title if info.title else hash_string, md5(u + '...').hexdigest()))) break else: for template in ['%s ', ' %s', '\n%s', '%s\n', '%s.', '%s,']: if template % url in s: s = s.replace(template % url, template % ('<a href="%s" target="_blank" title="%s">%s</a>' % (hash_string, info.title if info.title else hash_string, hash_string))) break for url in urls: s = s.replace(md5(url).hexdigest(), url) if len(url) > 60 and url.startswith('http'): s = s.replace(md5(url[:60] + '...').hexdigest(), url[:60] + '...') mentions = MENTIONS_RE.findall(s) if mentions: for mention in mentions: if '](topic:' in mention: topic = re.compile('@\[(?P<name>.+)\]\((?P<id>.*)\)').match(mention).groupdict() topic['id'] = topic['id'].split(':', 1)[-1] #TODO: update topic name? s = s.replace(mention, '<a href="/chat/topic/%s" class="chat">%s</a>' % (topic.get('id'), topic.get('name'))) elif '](user:'******'@\[(?P<name>.+)\]\((?P<id>.*)\)').match(mention).groupdict() user['id'] = user['id'].split(':', 1)[-1] s = s.replace(mention, '<a href="/chat/user/%s" class="chat"><span class="tag">%s</span></a>' % (user.get('id'), user.get('name'))) else: group = re.compile('@\[(?P<name>.+)\]\((?P<id>.*)\)').match(mention).groupdict() group['id'] = group['id'].split(':', 1)[-1] s = s.replace(mention, '<a href="/group/%s" class="async"><span class="tag">%s</span></a>' % (group.get('id'), group.get('name'))) # hashtags = re.compile('(#\[.*?\))').findall(s) # if hashtags: # for hashtag in hashtags: # tag = re.compile('#\[(?P<name>.+)\]\((?P<id>.*)\)').match(hashtag).groupdict() # tag['id'] = tag['id'].split(':', 1)[-1] # s = s.replace(hashtag, # '<a href="?hashtag=%s" class="overlay"><span class="tag">%s</span></a>' % (tag.get('id'), tag.get('name'))) cache.set(key, s, namespace="filters") return s