Beispiel #1
0
 def google_contacts(self):
   return [User({'_id': api.get_user_id_from_email_address(email),
                 'email': email}) for email in self.info.get('google_contacts', [])]
Beispiel #2
0
 def google_contacts(self):
     return [User({'_id': api.get_user_id_from_email_address(email,
                                                             db_name=self.db_name),
                   'email': email}) \
             for email in self.info.get('google_contacts', [])]
Beispiel #3
0
def autolink(text):  
  if not text:
    return text
  
  key = '%s:autolink' % hash(text)
  out = cache.get(key, namespace="filters")
  if out:
    return out
  
  if re.match(EMAIL_RE, text):
    email = text 
    user_id = api.get_user_id_from_email_address(email)
    user = api.get_user_info(user_id)
    return '<a href="/user/%s" class="async">%s</a>' % (user.id, user.name)
    
  s = text or ''
  s += ' '
  s = str(s) # convert unicode to string
  s = s.replace('\r\n', '\n')

  
  urls = api.extract_urls(s)
  urls = list(set(urls))
  urls.sort(key=len, reverse=True)
  
  for url in urls:
    hash_string = md5(url).hexdigest()
    info = api.get_url_info(url)
    if not url.startswith('http'):
      s = s.replace(url, '<a href="http://%s/" target="_blank" title="%s">%s</a>' % (hash_string, info.title if info.title else hash_string, hash_string))
    
    elif len(url) > 60:
      u = url[:60]
        
      for template in ['%s ', ' %s', '\n%s', '%s\n', '%s.', '%s,']:
        if template % url in s:
          s = s.replace(template % url, 
                        template % ('<a href="%s" target="_blank" title="%s">%s</a>' % (hash_string, info.title if info.title else hash_string, md5(u + '...').hexdigest())))
          break
    else:
      for template in ['%s ', ' %s', '\n%s', '%s\n', '%s.', '%s,']:
        if template % url in s:
          s = s.replace(template % url, 
                        template % ('<a href="%s" target="_blank" title="%s">%s</a>' % (hash_string, info.title if info.title else hash_string, hash_string)))
          break
        
  for url in urls:
    s = s.replace(md5(url).hexdigest(), url)
    if len(url) > 60 and url.startswith('http'):
      s = s.replace(md5(url[:60] + '...').hexdigest(), url[:60] + '...')
      
  
  mentions = MENTIONS_RE.findall(s)
  if mentions:
    for mention in mentions:
      if '](topic:' in mention:
        topic = re.compile('@\[(?P<name>.+)\]\((?P<id>.*)\)').match(mention).groupdict()
        topic['id'] = topic['id'].split(':', 1)[-1]
        
        #TODO: update topic name?
        s = s.replace(mention, 
             '<a href="/chat/topic/%s" class="chat">%s</a>' % (topic.get('id'), topic.get('name')))
      elif '](user:'******'@\[(?P<name>.+)\]\((?P<id>.*)\)').match(mention).groupdict()
        user['id'] = user['id'].split(':', 1)[-1]
        s = s.replace(mention, 
             '<a href="/chat/user/%s" class="chat"><span class="tag">%s</span></a>' % (user.get('id'), user.get('name')))
      else:
        group = re.compile('@\[(?P<name>.+)\]\((?P<id>.*)\)').match(mention).groupdict()
        group['id'] = group['id'].split(':', 1)[-1]
        s = s.replace(mention, 
             '<a href="/group/%s" class="async"><span class="tag">%s</span></a>' % (group.get('id'), group.get('name')))
        
#  hashtags = re.compile('(#\[.*?\))').findall(s)
#  if hashtags:
#    for hashtag in hashtags:
#      tag = re.compile('#\[(?P<name>.+)\]\((?P<id>.*)\)').match(hashtag).groupdict()
#      tag['id'] = tag['id'].split(':', 1)[-1]
#      s = s.replace(hashtag, 
#           '<a href="?hashtag=%s" class="overlay"><span class="tag">%s</span></a>' % (tag.get('id'), tag.get('name')))
  
  cache.set(key, s, namespace="filters")
  return s
Beispiel #4
0
def autolink(text):
    if not text:
        return text

    key = '%s:autolink' % hash(text)
    out = cache.get(key, namespace="filters")
    if out:
        return out

    if re.match(EMAIL_RE, text):
        email = text
        user_id = api.get_user_id_from_email_address(email)
        user = api.get_user_info(user_id)
        return '<a href="/user/%s" class="async">%s</a>' % (user.id, user.name)

    s = text or ''
    s += ' '
    s = str(s)  # convert unicode to string
    s = s.replace('\r\n', '\n')

    urls = api.extract_urls(s)
    urls = list(set(urls))
    urls.sort(key=len, reverse=True)

    for url in urls:
        hash_string = md5(url).hexdigest()
        info = api.get_url_info(url)
        if not url.startswith('http'):
            s = s.replace(
                url, '<a href="http://%s/" target="_blank" title="%s">%s</a>' %
                (hash_string, info.title if info.title else hash_string,
                 hash_string))

        elif len(url) > 60:
            u = url[:60]

            for template in ['%s ', ' %s', '\n%s', '%s\n', '%s.', '%s,']:
                if template % url in s:
                    s = s.replace(
                        template % url, template %
                        ('<a href="%s" target="_blank" title="%s">%s</a>' %
                         (hash_string, info.title if info.title else
                          hash_string, md5(u + '...').hexdigest())))
                    break
        else:
            for template in ['%s ', ' %s', '\n%s', '%s\n', '%s.', '%s,']:
                if template % url in s:
                    s = s.replace(
                        template % url, template %
                        ('<a href="%s" target="_blank" title="%s">%s</a>' %
                         (hash_string, info.title
                          if info.title else hash_string, hash_string)))
                    break

    for url in urls:
        s = s.replace(md5(url).hexdigest(), url)
        if len(url) > 60 and url.startswith('http'):
            s = s.replace(md5(url[:60] + '...').hexdigest(), url[:60] + '...')

    mentions = MENTIONS_RE.findall(s)
    if mentions:
        for mention in mentions:
            if '](topic:' in mention:
                topic = re.compile('@\[(?P<name>.+)\]\((?P<id>.*)\)').match(
                    mention).groupdict()
                topic['id'] = topic['id'].split(':', 1)[-1]

                #TODO: update topic name?
                s = s.replace(
                    mention, '<a href="/chat/topic/%s" class="chat">%s</a>' %
                    (topic.get('id'), topic.get('name')))
            elif '](user:'******'@\[(?P<name>.+)\]\((?P<id>.*)\)').match(
                    mention).groupdict()
                user['id'] = user['id'].split(':', 1)[-1]
                s = s.replace(
                    mention,
                    '<a href="/user/%s" class="async"><span class="tag">%s</span></a>'
                    % (user.get('id'), user.get('name')))
            else:
                group = re.compile('@\[(?P<name>.+)\]\((?P<id>.*)\)').match(
                    mention).groupdict()
                group['id'] = group['id'].split(':', 1)[-1]
                s = s.replace(
                    mention,
                    '<a href="/group/%s" class="async"><span class="tag">%s</span></a>'
                    % (group.get('id'), group.get('name')))


#  hashtags = re.compile('(#\[.*?\))').findall(s)
#  if hashtags:
#    for hashtag in hashtags:
#      tag = re.compile('#\[(?P<name>.+)\]\((?P<id>.*)\)').match(hashtag).groupdict()
#      tag['id'] = tag['id'].split(':', 1)[-1]
#      s = s.replace(hashtag,
#           '<a href="?hashtag=%s" class="overlay"><span class="tag">%s</span></a>' % (tag.get('id'), tag.get('name')))

    cache.set(key, s, namespace="filters")
    return s
Beispiel #5
0
  def process_message(self, peer, mailfrom, rcpttos, data):
    """
    peer is a tuple containing (ipaddr, port) of the client that made the
    socket connection to our smtp port.

    mailfrom is the raw address the client claims the message is coming
    from.

    rcpttos is a list of raw addresses the client wishes to deliver the
    message to.

    data is a string containing the entire full text of the message,
    headers (if supplied) and all.  It has been `de-transparencied'
    according to RFC 821, Section 4.5.2.  In other words, a line
    containing a `.' followed by other text has had the leading dot
    removed.

    This function should return None, for a normal `250 Ok' response;
    otherwise it returns the desired response string in RFC 821 format.

    """
    print peer, mailfrom, rcpttos, len(data)
    user_email = mailfrom.lower().strip()
    
    # Extract reply text from message
    message = get_reply_text(data)
    if not message:
      return None # Can't parse reply text
    
    item_id = rcpttos[0].split('@')[0]
    post_id = user_id = group_id = None
    if item_id.startswith('post'):
      post_id = item_id[4:]
    elif item_id.startswith('user'):
      user_id = item_id[4:]
    elif item_id.startswith('group'):
      group_id = item_id[5:]
    else:
      return None
    
    if post_id:
      post_id = post_id.replace('-', '/')
      while True:
        try:
          post_id = smaz.decompress(base64.b64decode(post_id))
          break
        except TypeError: # Incorrect padding
          post_id = post_id + '='
      post_id, db_name = post_id.split('-')
      if not post_id.isdigit():
        return None
      
      post_id = int(post_id)
      user_id = api.get_user_id_from_email_address(user_email, db_name=db_name)
      if not user_id:
        return None
      session_id = api.get_session_id(user_id, db_name=db_name)
      if not session_id:
        return None
      
      api.new_comment(session_id, message, post_id, db_name=db_name)
      return None
    else:
      return None