Example #1
0
 def parse_torrents(self, html):
     for (name, links, age) in EZTV.name_re.findall(html):
         quality = parse_quality(name)
         if self.quality and self.quality != quality:
             continue
         pub_date = (datetime.now() - Age.parse(age)).strftime("%Y%m%d%H%M%S")
         for link in EZTV.link_re.findall(links):
             yield Torrent(name = name,
                 source = 'eztv',
                 seeds = -1, leechs = -1,
                 size = -1,
                 files = -1,
                 quality = quality,
                 url = link,
                 date = datetime.now().strftime("%Y%m%d%H%M%S"),
                 pub_date = pub_date)
     return
Example #2
0
 def search(self, show):
     url = "http://www.ezrss.it/search/index.php?show_name=%s%s&date=&quality=%s&release_group=&mode=rss" % (
         urllib2.quote(self.show_name),
         "&show_name_exact=true" if self.show_name_exact else '',
         self.quality)
     rss = urllib2.urlopen(url).read()
     for item in BeautifulSoup(rss).rss.channel.findAll('item'):
         yield Torrent(name = item.title.string,
             source = 'eztv',
             seeds = -1, leechs = -1,
             size = int(item.enclosure['length']),
             files = -1,
             quality = parse_quality(item.title.string),
             url = item.enclosure['url'],
             date = datetime.now().strftime("%Y%m%d%H%M%S"),
             pub_date = datetime.strptime(item.pubdate.string[5:25], "%d %b %Y %H:%M:%S").strftime("%Y%m%d%H%M%S"))
     return
Example #3
0
 def search(self, show):
     keywords = [ show.expand(w) for w in self.keywords ]
     url = "http://isohunt.com/js/json.php?ihq=%s&sort=seeds" % urllib2.quote(" ".join(keywords))
     res = json.loads(urllib2.urlopen(url).read())
     for t in res['items']['list']:
         if t['title']:
             name = unicode(t['title']).replace('<b>', '').replace('</b>', '')
             yield Torrent(name = name,
                 source = 'isohunt',
                 seeds = int(t['Seeds']) if t['Seeds'] else -1,
                 leechs = int(t['leechers']) if t['leechers'] else -1,
                 size = t['length'],
                 files = int(t['files']) if t['files'] else -1,
                 quality = parse_quality(name),
                 url = t['enclosure_url'],
                 date = datetime.now().strftime("%Y%m%d%H%M%S"),
                 pub_date = datetime.strptime(t['pubDate'][5:25], "%d %b %Y %H:%M:%S").strftime("%Y%m%d%H%M%S"))
     return
 def parse_torrents(self, keywords, page):
     name_re = re.compile(r"""<div class="detName"><a .*?>(.+?)</a></div>\s*<a href="(.+?)".+?Uploaded (.+?), Size (.+?), ULed by.+?>(.+?)</a>.+?<td align="right">(.+?)</td>\s+<td align="right">(.+?)</td>\s+</tr>""", re.MULTILINE | re.DOTALL)
     url = "http://thepiratebay.org/search/%s/%d/3" % (urllib2.quote(keywords), page)
     for a in name_re.findall(urllib2.urlopen(url).read()):
         quality = parse_quality(a[0])
         if self.quality and self.quality != quality:
             continue
         if self.users and a[4] in self.users:
             yield Torrent(name = a[0],
                 source = 'thepiratebay',
                 seeds = int(a[5]),
                 leechs = int(a[6]),
                 size = convert_size(a[3]),
                 files = -1,
                 quality = quality,
                 url = a[1],
                 date = datetime.now().strftime("%Y%m%d%H%M%S"),
                 pub_date = convert_date(a[2]).strftime("%Y%m%d%H%M%S"))