def getTorrent(self, episode_descriptor): s = self.getEZTV_series_page(episode_descriptor) page = urllib2.urlopen("http://eztv.it" + s) soup = BeautifulSoup(page) candidates = [] for result in soup.findAll('tr', {"class": "forum_header_border"}): parts = result.findAll('td', {"class": "forum_thread_post"}) if len(parts) != 4: continue title = parts[1].a.string magnet = parts[2].findAll('a', {"class": "magnet"})[0]['href'] candidates.append((title, magnet)) candidates = episode_descriptor.filter(candidates) if len(candidates) == 0: log.info("no torrent candidates found") return None log.info("candidate torrents %s" % (candidates, )) episode = score(candidates) log.info("chosen torrent %s" % (episode, )) return episode[1]
def getTorrent(self, episode_descriptor): s = self.getEZTV_series_page(episode_descriptor) page = urllib2.urlopen("http://eztv.it" + s) soup = BeautifulSoup(page) candidates = [] for result in soup.findAll("tr", {"class": "forum_header_border"}): parts = result.findAll("td", {"class": "forum_thread_post"}) if len(parts) != 4: continue title = parts[1].a.string magnet = parts[2].findAll("a", {"class": "magnet"})[0]["href"] candidates.append((title, magnet)) candidates = episode_descriptor.filter(candidates) if len(candidates) == 0: log.info("no torrent candidates found") return None log.info("candidate torrents %s" % (candidates,)) episode = score(candidates) log.info("chosen torrent %s" % (episode,)) return episode[1]
def getTorrent(self, episode_descriptor): log.info("searching for torrent: %s", episode_descriptor) s = episode_descriptor.get_query_string() s = urllib.quote(s) log.info("searching: " + s) #page = urllib2.urlopen("http://thepiratebay.org/search/%s/0/7/0" %(s,)) page = open("kat_tidy.html") soup = BeautifulSoup(page) candidates = [] r = soup.findAll('td', {'class': 'torrentnameCell'}) print r for i in r[0].findAll('tr'): print "++++" print i try: tds = i.findAll('td') td = tds[1] title = re.sub(r'\s+', ' ', td.div.a.string) title = re.sub(r'^\s*', '', title) title = re.sub(r'\s*$', '', title) print "title:", title magnet = tds[1].findAll( 'a', {'href': re.compile('^magnet')})[0]['href'] candidates.append((title, magnet)) except Exception as ex: print ex pass candidates = episode_descriptor.filter(candidates) if len(candidates) == 0: raise RuntimeError("no torrent for episode '" + str(episode_descriptor) + "' found") episode = score(candidates) print episode return episode[1]
def getTorrent(self, episode_descriptor): log.info("searching for torrent: %s", episode_descriptor) s = episode_descriptor.get_query_string() s = urllib.quote(s) log.info("searching: " + s) page = urllib2.urlopen("http://thepiratebay.org/search/%s/0/7/0" % (s, )) soup = BeautifulSoup(page) candidates = [] r = soup.findAll(id='searchResult') if len(r) == 0: log.info("no torrents found for query") return None for i in r[0].findAll('tr'): try: tds = i.findAll('td') td = tds[1] title = re.sub(r'\s+', ' ', td.div.a.string) title = re.sub(r'^\s*', '', title) title = re.sub(r'\s*$', '', title) magnet = tds[1].findAll( 'a', {'href': re.compile('^magnet')})[0]['href'] candidates.append((title, magnet)) except Exception as ex: print ex pass candidates = episode_descriptor.filter(candidates) if len(candidates) == 0: log.info("no torrent candidates found") return None log.info("candidates torrents: %s" % (candidates, )) episode = score(candidates) log.info("chosen torrent: %s" % (episode, )) return episode[1]
def getTorrent(self, episode_descriptor): log.info("searching for torrent: %s", episode_descriptor) s = episode_descriptor.get_query_string() s = urllib.quote(s) log.info("searching: " + s) page = urllib2.urlopen("http://thepiratebay.org/search/%s/0/7/0" % (s,)) soup = BeautifulSoup(page) candidates = [] r = soup.findAll(id="searchResult") if len(r) == 0: log.info("no torrents found for query") return None for i in r[0].findAll("tr"): try: tds = i.findAll("td") td = tds[1] title = re.sub(r"\s+", " ", td.div.a.string) title = re.sub(r"^\s*", "", title) title = re.sub(r"\s*$", "", title) magnet = tds[1].findAll("a", {"href": re.compile("^magnet")})[0]["href"] candidates.append((title, magnet)) except Exception as ex: print ex pass candidates = episode_descriptor.filter(candidates) if len(candidates) == 0: log.info("no torrent candidates found") return None log.info("candidates torrents: %s" % (candidates,)) episode = score(candidates) log.info("chosen torrent: %s" % (episode,)) return episode[1]
def getTorrent(self, episode_descriptor): log.info("searching for torrent: %s", episode_descriptor) s = episode_descriptor.get_query_string() s = urllib.quote(s) log.info("searching: " + s) #page = urllib2.urlopen("http://thepiratebay.org/search/%s/0/7/0" %(s,)) page = open("kat_tidy.html") soup = BeautifulSoup(page) candidates = [] r = soup.findAll('td', {'class':'torrentnameCell'}) print r for i in r[0].findAll('tr'): print "++++" print i try: tds = i.findAll('td') td = tds[1] title = re.sub(r'\s+', ' ', td.div.a.string) title = re.sub(r'^\s*', '', title) title = re.sub(r'\s*$', '', title) print "title:", title magnet = tds[1].findAll('a', {'href' : re.compile('^magnet')})[0]['href'] candidates.append( (title, magnet) ) except Exception as ex: print ex pass candidates = episode_descriptor.filter(candidates) if len(candidates) == 0: raise RuntimeError("no torrent for episode '" + str(episode_descriptor) + "' found") episode = score(candidates) print episode return episode[1]