Exemple #1
0
def get_data(filename, directory=None):
    if directory is None:
        directory = dirname(__file__)
    file_path = join_path(directory, filename)
    try:
        sock = open(file_path)
    except IOError:
        data = None
    else:
        data = json_decode(sock.read())
        sock.close()
    return data
Exemple #2
0
def get_data(filename, directory=None):
    if directory is None:
        directory = dirname(__file__)
    file_path = join_path(directory, filename)
    try:
        sock = open(file_path)
    except IOError:
        data = None
    else:
        data = json_decode(sock.read())
        sock.close()
    return data
Exemple #3
0
 def set_avatar(self, size=80):
     """ Try and scrape the profile image from Google+ using YQL, falling back
       on `Gravatar`_ if this fails.
       
       The scrape hack relies on either `https://profiles.google.com/${username}`
       or `http://gplus.to/${username}` redirecting to the right Google+ url.
       
       _`Gravatar`: http://en.gravatar.com/site/implement/images/
     """
     
     avatar = None
     has_real_avatar = False
     
     # If we can get a google username from the email (quite likely) then try
     # scraping the profile image from Google+ (nasty).
     google_username = None
     parts = self.nickname.split('@')
     if len(parts) == 1:
         google_username = self.nickname
     elif parts[1] == 'gmail.com' or parts[1] == 'googlemail.com':
         google_username = parts[0]
     
     if google_username is not None:
         urls = [
             'https://profiles.google.com/%s' % google_username,
             'http://gplus.to/%s' % google_username
         ]
         for url in urls:
             # Build YQL query.
             table = 'http://yqlblog.net/samples/data.html.cssselect.xml'
             what = 'data.html.cssselect'
             query = 'use "%s" as %s; select * from %s where url="%s" and css=".photo"' % (
                 table,
                 what,
                 what,
                 url
             )
             # Make YQL query.
             sock = urllib.urlopen(
                 'https://query.yahooapis.com/v1/public/yql',
                 data=urllib.urlencode({'q': query, 'format': 'json'})
             )
             text = sock.read()
             sock.close()
             # Parse the response.
             data = json_decode(text)
             results = data['query']['results']['results']
             if results is not None:
                 src = results['img']['src']
                 avatar = src.replace('?sz=200', '?sz=%s' % size)
                 has_real_avatar = True
                 break
             
     # Fall back on Gravatar.
     if avatar is None:
         avatar = self.get_gravatar(size=size)
         # Calling the thing with `d=404` to determine whether the avatar is real
         # or not.
         try:
             sock = urllib2.urlopen(avatar.replace('&d=mm', '&d=404'))
         except urllib2.URLError, e:
             has_real_avatar = False
         else:
             has_real_avatar = True
Exemple #4
0
 def get(self):
     """
     """
     
     KEY_NAME = 'v1'
     avatars = model.Avatars.get_by_key_name(KEY_NAME)
     if avatars is None:
         avatars = model.Avatars(key_name=KEY_NAME)
     
     # Cache the response for 30 minutes.
     CACHE_KEY = 'activity'
     CACHE_TIME = 60 * 30
     
     flag = memcache.get(CACHE_KEY)
     if flag is None:
         
         # make async request to Twitter
         url = 'https://api.twitter.com/1/followers/ids.json?cursor=-1&screen_name=wikihouse'
         twitter = urlfetch.create_rpc()
         urlfetch.make_fetch_call(twitter, url)
         
         # make async request to Disqus
         url = 'http://disqus.com/api/3.0/forums/listUsers.json?forum=wikihouse'
         public_key = '9PVKAMMxgF4Hn3q4G3XnqxNToQNGkMc6qEU8Zs0K79KZE1IkTg5kZioZSOjdZjIJ'
         disqus = urlfetch.create_rpc()
         urlfetch.make_fetch_call(disqus, '%s&api_key=%s' % (url, public_key))
         
         # Get the Twitter follower ids.
         try:
             result = twitter.get_result()
             if result.status_code == 200:
                 text = result.content
                 data = json_decode(text)
                 ids = ','.join([str(id) for id in data['ids']])
                 # Get the Twitter follower profile image urls.
                 # via another async request
                 url = 'https://api.twitter.com/1/users/lookup.json?cursor=-1&user_id='
                 twitter = urlfetch.create_rpc()
                 urlfetch.make_fetch_call(twitter, '%s%s' % (url, ids))
                 result = twitter.get_result()
                 if result.status_code == 200:
                     text = result.content
                     data = json_decode(text)
                     for item in data:
                         avatar_url = item['profile_image_url'].replace('_normal.', '.')
                         if avatar_url not in avatars.twitter_followers:
                             avatars.twitter_followers.append(avatar_url)
         except urlfetch.DownloadError:
             pass
         
         # Get the disqus avatar permalinks.
         try:
             result = disqus.get_result()
             if result.status_code == 200:
                 text = result.content
                 data = json_decode(text)
                 for item in data['response']:
                     avatar_url = item['avatar']['permalink']
                     if avatar_url not in avatars.disqus_commenters:
                         avatars.disqus_commenters.append(avatar_url)
         except urlfetch.DownloadError:
             pass
         
         # Save the avatars
         avatars.put()
         
         # Flag that we don't need to do that again for a bit.
         memcache.set(CACHE_KEY, True, time=CACHE_TIME)
     
     # Get the images to mix into the projection
     designs = model.Design.all_listings()
     images = [u'/blob/%s' % item.model_preview.key() for item in designs]
     users_with_avatars = model.User.get_with_real_avatars()
     images += [item.avatar for item in users_with_avatars]
     images += avatars.twitter_followers
     images += avatars.disqus_commenters
     
     # Rattle the tin.
     random.shuffle(images)
     
     # Render the page and cache the output.
     return self.render('activity.tmpl', items=images[:300])
Exemple #5
0
    def get(self):
        """
        """

        KEY_NAME = 'v1'
        avatars = model.Avatars.get_by_key_name(KEY_NAME)
        if avatars is None:
            avatars = model.Avatars(key_name=KEY_NAME)

        # Cache the response for 30 minutes.
        CACHE_KEY = 'activity'
        CACHE_TIME = 60 * 30

        flag = memcache.get(CACHE_KEY)
        if flag is None:

            # make async request to Twitter
            url = 'https://api.twitter.com/1/followers/ids.json?cursor=-1&screen_name=wikihouse'
            twitter = urlfetch.create_rpc()
            urlfetch.make_fetch_call(twitter, url)

            # make async request to Disqus
            url = 'http://disqus.com/api/3.0/forums/listUsers.json?forum=wikihouse'
            public_key = '9PVKAMMxgF4Hn3q4G3XnqxNToQNGkMc6qEU8Zs0K79KZE1IkTg5kZioZSOjdZjIJ'
            disqus = urlfetch.create_rpc()
            urlfetch.make_fetch_call(disqus,
                                     '%s&api_key=%s' % (url, public_key))

            # Get the Twitter follower ids.
            try:
                result = twitter.get_result()
                if result.status_code == 200:
                    text = result.content
                    data = json_decode(text)
                    ids = ','.join([str(id) for id in data['ids']])
                    # Get the Twitter follower profile image urls.
                    # via another async request
                    url = 'https://api.twitter.com/1/users/lookup.json?cursor=-1&user_id='
                    twitter = urlfetch.create_rpc()
                    urlfetch.make_fetch_call(twitter, '%s%s' % (url, ids))
                    result = twitter.get_result()
                    if result.status_code == 200:
                        text = result.content
                        data = json_decode(text)
                        for item in data:
                            avatar_url = item['profile_image_url'].replace(
                                '_normal.', '.')
                            if avatar_url not in avatars.twitter_followers:
                                avatars.twitter_followers.append(avatar_url)
            except urlfetch.DownloadError:
                pass

            # Get the disqus avatar permalinks.
            try:
                result = disqus.get_result()
                if result.status_code == 200:
                    text = result.content
                    data = json_decode(text)
                    for item in data['response']:
                        avatar_url = item['avatar']['permalink']
                        if avatar_url not in avatars.disqus_commenters:
                            avatars.disqus_commenters.append(avatar_url)
            except urlfetch.DownloadError:
                pass

            # Save the avatars
            avatars.put()

            # Flag that we don't need to do that again for a bit.
            memcache.set(CACHE_KEY, True, time=CACHE_TIME)

        # Get the images to mix into the projection
        designs = model.Design.all_listings()
        images = [u'/blob/%s' % item.model_preview.key() for item in designs]
        users_with_avatars = model.User.get_with_real_avatars()
        images += [item.avatar for item in users_with_avatars]
        images += avatars.twitter_followers
        images += avatars.disqus_commenters

        # Rattle the tin.
        random.shuffle(images)

        # Render the page and cache the output.
        return self.render('activity.tmpl', items=images[:300])
Exemple #6
0
 def set_avatar(self, size=80):
     """ Try and scrape the profile image from Google+ using YQL, falling back
       on `Gravatar`_ if this fails.
       
       The scrape hack relies on either `https://profiles.google.com/${username}`
       or `http://gplus.to/${username}` redirecting to the right Google+ url.
       
       _`Gravatar`: http://en.gravatar.com/site/implement/images/
     """
     
     avatar = None
     has_real_avatar = False
     
     # If we can get a google username from the email (quite likely) then try
     # scraping the profile image from Google+ (nasty).
     google_username = None
     parts = self.nickname.split('@')
     if len(parts) == 1:
         google_username = self.nickname
     elif parts[1] == 'gmail.com' or parts[1] == 'googlemail.com':
         google_username = parts[0]
     
     if google_username is not None:
         urls = [
             'https://profiles.google.com/%s' % google_username,
             'http://gplus.to/%s' % google_username
         ]
         for url in urls:
             # Build YQL query.
             table = 'http://yqlblog.net/samples/data.html.cssselect.xml'
             what = 'data.html.cssselect'
             query = 'use "%s" as %s; select * from %s where url="%s" and css=".photo"' % (
                 table,
                 what,
                 what,
                 url
             )
             # Make YQL query.
             sock = urllib.urlopen(
                 'https://query.yahooapis.com/v1/public/yql',
                 data=urllib.urlencode({'q': query, 'format': 'json'})
             )
             text = sock.read()
             sock.close()
             # Parse the response.
             data = json_decode(text)
             results = data['query']['results']['results']
             if results is not None:
                 src = results['img']['src']
                 avatar = src.replace('?sz=200', '?sz=%s' % size)
                 has_real_avatar = True
                 break
             
     # Fall back on Gravatar.
     if avatar is None:
         avatar = self.get_gravatar(size=size)
         # Calling the thing with `d=404` to determine whether the avatar is real
         # or not.
         try:
             sock = urllib2.urlopen(avatar.replace('&d=mm', '&d=404'))
         except urllib2.URLError, e:
             has_real_avatar = False
         else:
             has_real_avatar = True