예제 #1
0
def index(request):
    """
        Load everything we need for the frontpage
        And that's a lot of stuff

    """
    # load news items (internal) and blog posts (members' blogs, fetched via rss)
    news = News.objects.select_related().order_by('-date')[:4]
    posts = Post.objects.order_by('-date_modified')[:4]
    videos = Video.objects.order_by('-pub_date')[:5]

    # splice both of them together into one list, and sort them by date
    # but the most recent news items always comes first, even if older
    # tiny technicality - their respective models use a different date field
    both = []
    both.extend(news[1:])
    for post in posts:
        post.date = post.date_modified
        both.append(post)
    both2 = sorted(both, cmp=sort_news, reverse=True)
    both2.insert(0, news[0])

    # load some tweets
    api = twitter.Api()
    try:
        tweets = api.GetSearch(term='kiberpipa OR cyberpipe', query_users=False, per_page=20)
    except (urllib2.URLError, socket.timeout, twitter.TwitterError):
        client.captureException()
        tweets = []

    # recent flickr uploads
    try:
        pictures = []
        # http://www.flickr.com/services/api/flickr.photosets.getList.html
        json = flickr_api.flickr_call(
            method='flickr.photosets.getList',
            user_id='40437802@N07',  # http://idgettr.com/
            per_page=5,
            pages=1,
            format="json",
            nojsoncallback=1)
    except (urllib2.URLError, socket.timeout):
        client.captureException()
    else:
        r = simplejson.loads(json)
        if r.get('stat', 'error') == 'ok':
            photosets = r['photosets']['photoset']
            photosets = sorted(photosets, key=lambda x: x['date_create'], reverse=True)
            for image in photosets[:7]:
                if int(image['photos']) == 0:
                    continue
                image['thumb_url'] = settings.PHOTOS_FLICKR_SET_IMAGE_URL_N % image
                image['url'] = 'http://www.flickr.com/photos/kiberpipa/sets/%(id)s/' % image
                image['title'] = image['title']['_content']
                pictures.append(image)

    return render_to_response('www/index.html', {
        'news': news,
        'planet': posts,
        'both': both2,
        'videos': videos,
        'tweets': tweets,
        'pictures': pictures,
    }, context_instance=RequestContext(request))
예제 #2
0
파일: views.py 프로젝트: Rhenan/Intranet
def index(request):
    """
        Load everything we need for the frontpage
        And that's a lot of stuff

    """
    # load news items (internal) and blog posts (members' blogs, fetched via rss)
    news = News.objects.select_related().order_by('-date')[:4]
    posts = Post.objects.order_by('-date_modified')[:4]
    videos = Video.objects.order_by('-pub_date')[:5]

    # splice both of them together into one list, and sort them by date
    # but the most recent news items always comes first, even if older
    # tiny technicality - their respective models use a different date field
    both = []
    both.extend(news[1:])
    for post in posts:
        post.date = post.date_modified
        both.append(post)
    both2 = sorted(both, cmp=sort_news, reverse=True)
    if news:
        both2.insert(0, news[0])

    # load some tweets
    # TODO: https://github.com/bear/python-twitter/issues/21
    tweets = []
    originals = []
    if hasattr(settings, "TWITTER_CONSUMER_KEY"):
        api = twitter.Api(
            consumer_key=settings.TWITTER_CONSUMER_KEY,
            consumer_secret=settings.TWITTER_CONSUMER_SECRET,
            access_token_secret=settings.TWITTER_ACCESS_TOKEN_SECRET,
            access_token_key=settings.TWITTER_ACCESS_TOKEN_KEY,
        )

        try:
            tweets = api.GetSearch(term='kiberpipa OR cyberpipe', count=50, include_entities=True)
            tweets_map = dict((tweet.id, tweet) for tweet in tweets) # for faster lookup by id
            originals = filter(lambda x : not x.retweeted_status, tweets) # original tweets, aka not rts

            # unshorten urls
            for tweet in tweets:
                if tweet.retweeted_status is not None:
                    tweet.text = tweet.retweeted_status.text
                    # unshorten urls for retweets (yes twitter api sux)
                    for url in tweet.retweeted_status.urls:
                        tweet.text = tweet.text.replace(url.url, url.expanded_url)
                for url in tweet.urls:
                    tweet.text = tweet.text.replace(url.url, url.expanded_url)

                # resolve mentions, hashtags
                for mention in tweet.user_mentions:
                    name = mention.screen_name
                    # sometimes, people will write mentions by hand, in the wrong case (eg. "@gittip" when the acc is really @Gitttip).
                    # if we were to use the vanilla (case-sensitive) .replace(), we wouln't have been able to catch these buggers
                    tweet.text = re.sub(re.compile("@" + name, re.I),
                        """<a rel="nofollow" target="_blank" href="https://twitter.com/%s">@%s</a>""" % (mention.screen_name, mention.screen_name),
                        tweet.text) 
                for hashtag in tweet.hashtags:
                    tweet.text = tweet.text.replace("#" + hashtag.text, 
                        """<a rel="nofollow" target="_blank" href="https://twitter.com/search?q=%s&amp;src=hash">#%s</a>""" % (hashtag.text, hashtag.text))

                # merge all retweets under the original tweet
                if tweet.retweeted_status:
                    rt_id = tweet.retweeted_status.id
                    # the odd case that the original tweet was not among the most recent ones
                    if not rt_id in tweets_map:
                        fake_rt = tweet.retweeted_status
                        fake_rt.__setattr__('rts', []);                   
                        tweets_map[rt_id] = fake_rt

                    # add the retweet to the "rts" list on the original tweet
                    rt = tweets_map[rt_id]
                    if not hasattr(rt, "rts"):
                        rt.__setattr__('rts', [])
                    rt.rts.append(tweet)
        except (urllib2.URLError, socket.timeout, twitter.TwitterError, ssl.SSLError):
            client.captureException()

    # recent flickr uploads
    try:
        pictures = []
        # http://www.flickr.com/services/api/flickr.photosets.getList.html
        json = flickr_api.flickr_call(
            method='flickr.photosets.getList',
            user_id='40437802@N07',  # http://idgettr.com/
            per_page=5,
            pages=1,
            format="json",
            nojsoncallback=1)
    except (urllib2.URLError, socket.timeout, ssl.SSLError):
        client.captureException()
    else:
        r = simplejson.loads(json)
        if r.get('stat', 'error') == 'ok':
            photosets = r['photosets']['photoset']
            photosets = sorted(photosets, key=lambda x: x['date_create'], reverse=True)
            for image in photosets[:7]:
                if int(image['photos']) == 0:
                    continue
                image['thumb_url'] = settings.PHOTOS_FLICKR_SET_IMAGE_URL_N % image
                image['url'] = 'http://www.flickr.com/photos/kiberpipa/sets/%(id)s/' % image
                image['title'] = image['title']['_content']
                pictures.append(image)

    return render_to_response('www/index.html', {
        'news': news,
        'planet': posts,
        'both': both2,
        'videos': videos,
        'tweets': tweets,
        'original_tweets' : originals[:20],
        'pictures': pictures,
    }, context_instance=RequestContext(request))