Exemplo n.º 1
0
 def test_search_show(self):
     print(self.twitter.search_show(Listing("The Dark Knight|tv")))
Exemplo n.º 2
0
        result_list = self.graph_api.request(
            '/search', {
                'q': tv_listing.show_name,
                'type': 'page',
                'fields': 'likes,id,name,talking_about_count',
                'limit': 1
            })
        data_ = result_list['data']
        if data_:
            result = data_[0]
            result['fans'] = result.pop('likes')
            return result
        time.sleep(2)
        return {}


if __name__ == '__main__':
    search = FbSearch()
    listing_file = file(sys.argv[1], 'r')
    scores = open(sys.argv[2], 'a')
    for line in listing_file:
        value = line.strip()
        print(value)
        listing = Listing(value)
        try:
            score = search.search_show(listing)
            scores.write(value + '\t' + ujson.dumps(score) + "\n")
        except Exception as e:
            print(value, e)
            time.sleep(30 * 60)
    scores.close()
Exemplo n.º 3
0
 def test_search_show(self):
     print(
         self.fbSearch.search_show(
             Listing("Escape From Planet Earth|movie")))
Exemplo n.º 4
0
                reviews = count
            elif href == 'externalreviews':
                external_reviews = count

        return {
            'popularity': popularity,
            'popularity_change': popularity_change,
            'rating': rating,
            'best_rating': best_rating,
            'users': users,
            'reviews': reviews,
            'external_reviews': external_reviews
        }


if __name__ == '__main__':
    imdb_scrape = IMDBScrape()
    listing_file = file(sys.argv[1], 'r')
    imdb_scores = open(sys.argv[2], 'a')
    imdb_missed = open(sys.argv[3], 'a')
    for line in listing_file:
        try:
            print(line.strip())
            listing = Listing(line.strip())
            page_url = imdb_scrape.page_url(listing)
            with contextlib.closing(urllib.urlopen(page_url)) as response:
                score = imdb_scrape.get_scores(response.read())
            imdb_scores.write(line.strip() + '\t' + ujson.dumps(score) + "\n")
        except AttributeError as e:
            imdb_missed.write(line.strip() + '\n')
    imdb_scores.close()
Exemplo n.º 5
0
 def test_finder_url(self):
     listing = Listing("CNN Newsroom|tv")
     self.assertEqual(
         self.rt.finder_url(listing),
         "http://www.rottentomatoes.com/search/?search=Law+%26+Order%3A+Special+Victims+Unit#results_tv_tab"
     )
Exemplo n.º 6
0
 def test_page_url(self):
     listing = Listing("CNN Newsroom|tv")
     print(self.imdb.page_url(listing))
Exemplo n.º 7
0
 def test_fetch(self):
     listing = Listing("Top 20 Most Shocking|tv")
     self.assertEqual(
         self.imdb.finder_url(listing, titles=True),
         'http://www.imdb.com/find?q=Law+%26+Order%3A+Special+Victims+Unit&type=ep&s=tt&&exact=true'
     )