Esempio n. 1
0
 def _get_magnet(self,url):
    i=len(url)-1
    while url[i]!='/':
       i-=1
    url=url[:i+1]+urllib.quote_plus(url[i+1:])
    c=httplib2.Http()
    resp,content=c.request(url)
    if "set-cookie" in resp:
       cookie=resp['set-cookie']
    else:
       cookie=None
    tree=libxml2.htmlParseDoc(content,"utf-8")
    form=htmltools.find_elements(tree.getRootElement(),"form",id="frmAdultDisclaimer")
    if form:
       form=form[0]
       inputs=htmltools.find_elements(form,"input")
       body={}
       for i in inputs:
          body[i.prop('name')]=i.prop('value')
       del body['btn_Decline']
       body=urllib.urlencode(body)
       headers={'Content-type':"application/x-www-form-urlencoded"}
       if cookie:
          headers['Cookie']=cookie
       url=urllib.basejoin(url,form.prop('action'))
       resp,content=c.request(url,"POST",body,headers)
       if "set-cookie" in resp:
          cookie=resp['set-cookie']
       if cookie:
          headers['Cookie']=cookie
       url=urllib.basejoin(url,resp["location"])
       resp,content=c.request(url,headers=headers)
       tree=libxml2.htmlParseDoc(content,"utf-8")
    return htmltools.find_elements(tree.getRootElement(),"a",**{'class':'dwld_links'})[0].prop('href')
Esempio n. 2
0
 def _do_load_filelist(self):
    res = TorrentSearch.Plugin.FileList()
    http=httplib2.Http()
    headers={'Cookie':self.plugin.login_cookie}
    resp,content=http.request("http://www2.frenchtorrentdb.com/?section=INFOS&id="+self._get_site_id()+"&type=1",headers=headers)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    div = htmltools.find_elements(tree.getRootElement(), "div", id="mod_infos")[0]
    pre = htmltools.find_elements(div, "pre")[0]
    files = htmltools.find_elements(pre, "p")
    cur_folder = ""
    for i in files:
       if htmltools.find_elements(i, "img")[0].prop("src")=="/themes/images/files/folder.gif":
          cur_folder = i.getContent().strip().lstrip()
          continue
       data = i.getContent().strip().lstrip()
       j=len(data)-1
       while data[j]!='(':
          j-=1
       filename,size=data[:j],data[j+1:-1]
       filename = filename.strip().lstrip()
       if cur_folder:
          filename = cur_folder+"/"+filename
       size = size.strip().lstrip()
       res.append(filename, size)
    return res
Esempio n. 3
0
 def _do_load_filelist(self):
    res = TorrentSearch.Plugin.FileList()
    url = "http://www.torrenthound.com/hash/%s/files"%self.hashvalue
    c=httplib2.Http()
    resp,content=c.request(url)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    for i in htmltools.find_elements(htmltools.find_elements(tree.getRootElement(), "div", id="pcontent")[0], "tr", **{'class':'filename'}):
       filename,size=htmltools.find_elements(i,"td")
       filename=filename.getContent()
       size=size.getContent()
       res.append(filename,size.upper())
    return res
Esempio n. 4
0
 def _run_search(self,pattern,href=None,page=0):
    if href==None:
       href="http://mononoke-bt.org/browse2.php?search="+urllib.quote_plus(pattern)
    resp,content=self.http_queue_request(href,headers={'Cookie':self._app.parse_cookie(self.login_cookie)})
    tree=libxml2.htmlParseDoc(content,"utf-8")
    pager=htmltools.find_elements(tree.getRootElement(),"div",**{'class':'animecoversfan'})[0].parent.next
    try:
       data=htmltools.find_elements(pager,"b")[-1].getContent()
       i=len(data)-1
       while data[i] in "0123456789":
          i-=1
       self.results_count=eval(data[i+1:])
    except:
       pass
    restable=pager.next.next
    lines=htmltools.find_elements(restable,"tr",1)[1:-2]
    for i in lines:
       try:
          cells=htmltools.find_elements(i,"td")
          team, show, stype, name, torrent_link, nbfiles, nbcmt, rate, date, size, views, dl, seeders, leechers, ratio=cells
          link=htmltools.find_elements(name,"a")[0]
          label=link.getContent()
          link=urllib.basejoin(href,link.prop('href'))
          torrent_link=urllib.basejoin(href,htmltools.find_elements(torrent_link,"a")[0].prop('href'))+"&r=1"
          date=htmltools.find_elements(date,"nobr")[0].children.getContent()
          date=time.strptime(date,"%Y-%m-%d")
          date=datetime.date(date.tm_year,date.tm_mon,date.tm_mday)
          strsize=""
          cell=size.children
          while cell:
             if cell.name=="text":
                if strsize:
                   strsize+=" "
                strsize+=cell.getContent().upper()
             cell=cell.next
          size=strsize.replace('O','B')
          seeders=eval(seeders.getContent())
          leechers=eval(leechers.getContent())
          resp,content=self.http_queue_request(link,headers={'Cookie':self._app.parse_cookie(self.login_cookie)})
          itemtree=libxml2.htmlParseDoc(content,"utf-8")
          tds=htmltools.find_elements(itemtree.getRootElement(),"td")
          hashvalue=None
          for j in tds:
             if j.getContent()=="Info hash":
                hashvalue=j.next.next.getContent()
          self.add_result(MononokeBTPluginResult(label,date,size,seeders,leechers,torrent_link,hashvalue))
       except:
          pass
       if self.stop_search:
          return
    if not self.stop_search:
       try:
          b=htmltools.find_elements(pager,"b")[-1]
          if b.parent.name=="a":
             url="http://mononoke-bt.org/browse2.php?search=%s&page=%d"%(urllib.quote_plus(pattern),page+1)
             self._run_search(pattern,url,page+1)
       except:
          pass
Esempio n. 5
0
 def _do_load_comments(self):
    res = TorrentSearch.Plugin.CommentsList()
    url = "http://www.torrenthound.com/hash/%s/comments"%self.hashvalue
    c=httplib2.Http()
    resp,content=c.request(url)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    comments_list = []
    for i in htmltools.find_elements(htmltools.find_elements(tree.getRootElement(), "div", id="pcontent")[0], "div", **{'class':'c'})[:-1]:
       comments_list.insert(0,i)
    for i in comments_list:
       content = htmltools.find_elements(i, "div", **{'class':'middle'})[0].getContent()
       date=self._parseCommentDate(htmltools.find_elements(htmltools.find_elements(i, "div", **{'class':'top'})[0], "p")[0].children.getContent()[7:-6])
       res.append(TorrentSearch.Plugin.TorrentResultComment(content,date))
    return res
Esempio n. 6
0
 def _parseLinks(self,url):
    c=httplib2.Http()
    resp,content=c.request(url)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    div=htmltools.find_elements(tree.getRootElement(),"div",id="buttons")[0]
    links=htmltools.find_elements(div,"a")
    reflink=urllib.basejoin(url,links[0].prop('href'))
    try:
       magnet=htmltools.find_elements(htmltools.find_elements(tree.getRootElement(),"span",id="magnet")[0], "a")[0].prop('href')
       if "&" in magnet:
          magnet=magnet[:magnet.index("&")]
    except:
       magnet=None
    return reflink,magnet
Esempio n. 7
0
 def _run_search(self,pattern, page_url=''):
    http=httplib2.Http()
    headers={'Cookie':self.login_cookie}
    if page_url=="":
       page_url="http://www.bakabt.com/browse.php?q="+urllib.quote(pattern)
    resp,content=http.request(page_url,headers=headers)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    try:
       data=htmltools.find_elements(htmltools.find_elements(tree.getRootElement(), "div", **{'class':'pager'})[0], "a")[-2].getContent()
       i=len(data)-1
       while i>=0 and data[i] in "0123456789":
          i-=1
       self.results_count=eval(data[i+1:])
    except:
       pass
    results_table=htmltools.find_elements(tree.getRootElement(),"table",**{'class':'torrents'})[0]
    lines=htmltools.find_elements(results_table,"tr")[1:]
    is_alt=False
    for i in range(len(lines)):
       try:
          line=lines[i]
          if "torrent_alt" in line.prop('class') and not is_alt:
             is_alt=True
             continue
          if not "torrent_alt" in line.prop('class'):
             is_alt=False
          
          cells=htmltools.find_elements(line,"td")
          if len(cells)==6:
             category, details, comments, date, size, transfers = cells
          else:
             details, comments, date, size, transfers = cells
          day,month,year=date.getContent().replace("'","").split(" ")
          day=eval(day)
          year=eval("20"+year)
          month=['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'].index(month)+1
          date=datetime.date(year,month,day)
          seeders,leechers=htmltools.find_elements(transfers,"a")
          seeders=eval(seeders.getContent())
          leechers=eval(leechers.getContent())
          size=size.getContent()
          link=htmltools.find_elements(details,"a")[0]
          label=link.getContent()
          link=urllib.basejoin(page_url,link.prop('href'))
          self.add_result(BakaBTPluginResult(label,date,size,seeders,leechers,link))
       except:
          pass
       if self.stop_search:
          return
    if not self.stop_search:
       link=htmltools.find_elements(htmltools.find_elements(tree.getRootElement(), "div", **{'class':'pager'})[0], "a")[-1]
       if link.prop('class')!='selected':
          self._run_search(pattern, urllib.basejoin(page_url, link.prop('href')))
Esempio n. 8
0
   def _parseLinks(self,url):

      c=httplib2.Http()

      resp,content=c.request(url)

      tree=libxml2.htmlParseDoc(content,"utf-8")

      links=htmltools.find_elements(tree.getRootElement(),"a")

      reflink=""

      magnet=None

      for i in links:

         if i.getContent().lstrip().rstrip()=="Download torrent":

            reflink=urllib.basejoin(url,i.prop('href'))

         if i.getContent().lstrip().rstrip()=="magnet link":

            magnet=urllib.basejoin(url,i.prop('href'))

            if "&" in magnet:

               j=magnet.index("&")

               magnet=magnet[:j]

      return reflink,magnet
Esempio n. 9
0
 def _run_search(self,pattern,href=None):
    http=httplib2.Http()
    if href==None:
       href="http://xtremespeeds.net/browse.php"
       headers={'Content-type':'application/x-www-form-urlencoded','Cookie':self.login_cookie,"User-Agent":"Python-httplib2/$Rev$"}
       data=urllib.urlencode({'do':'search','keywords':pattern,'search_type':'t_name','category':'0'})
       resp,content=http.request(href,'POST',data,headers)
    else:
       headers={'Cookie':self.login_cookie,"User-Agent":"Python-httplib2/$Rev$"}
       resp,content=http.request(href,'POST',headers=headers)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    try:
       a=htmltools.find_elements(tree.getRootElement(),"a",**{'class':'current'})[0]
       data=a.prop('title')
       i=len(data)-1
       while data[i] in "0123456789":
          i-=1
       self.results_count=eval(data[i+1:])
    except:
       pass
    restable=htmltools.find_elements(tree.getRootElement(),"table",id="sortabletable")[0]
    lines=htmltools.find_elements(restable,"tr")[1:]
    for i in lines:
       try:
          category,name,torrent_link,comments,size,snatched,seeders,leechers,uploader=htmltools.find_elements(i,"td")
          label=htmltools.find_elements(name,"a")[0].getContent()
          date=htmltools.find_elements(name,"div")[0].getContent().rstrip().lstrip().split(' ')[0]
          date=time.strptime(date,"%m-%d-%Y")
          date=datetime.date(date.tm_year,date.tm_mon,date.tm_mday)
          torrent_link=htmltools.find_elements(torrent_link,"a")[0].prop('href')
          size=size.getContent().rstrip().lstrip()
          seeders=eval(seeders.getContent().rstrip().lstrip())
          leechers=eval(leechers.getContent().rstrip().lstrip())
          self.add_result(xtremespeedsPluginResult(label,date,size,seeders,leechers,torrent_link))
       except:
          pass
       if self.stop_search:
          return
    if not self.stop_search:
       try:
          next_link=None
          pager=htmltools.find_elements(tree.getRootElement(),"div",id="navcontainer_f")[0]
          links=htmltools.find_elements(pager,"a")
          for i in links:
             if i.getContent()==">":
                next_link=i
                break
          if next_link:
             self._run_search(pattern, urllib.basejoin(href,next_link.prop('href')))
       except:
          pass
Esempio n. 10
0
 def _run_search(self,pattern):
    url="http://www.monova.org/rss.php?search="+urllib.quote(pattern)+"&order=added"
    resp,content=self.http_queue_request(url)
    tree=libxml2.parseDoc(content)
    results=htmltools.find_elements(tree.getRootElement(), "item")
    self.results_count=len(results)
    for i in results:
       title=htmltools.find_elements(i, "title")[0].getContent()
       date=htmltools.find_elements(i, "pubDate")[0].getContent()
       day,month,year=date.split(" ")[1:4]
       while day[0]=="0":
          day=day[1:]
       day=eval(day)
       year=eval(year)
       month=['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'].index(month)+1
       date=datetime.date(year,month,day)
       link=htmltools.find_elements(i,"enclosure")[0]
       size=self._formatSize(link.prop('length'))
       torrent_link=link.prop('url')
       self.add_result(MonovaPluginResult(title, date, size, torrent_link))
       if self.stop_search:
          return
Esempio n. 11
0
 def _do_load_comments(self):
    i=len(self.details_url)-1
    while self.details_url[i]!='/':
       i-=1
    url=self.details_url[:i+1]+urllib.quote_plus(self.details_url[i+1:])
    res = TorrentSearch.Plugin.CommentsList()
    c=httplib2.Http()
    resp,content=c.request(url)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    try:
       comments_div = htmltools.find_elements(tree.getRootElement(), "div", id="comments")[0]
       comments_table = htmltools.find_elements(htmltools.find_elements(comments_div, "table", **{'class':'commentsTable'})[0], "table", width="750px")[0]
       lines = htmltools.find_elements(comments_table, "tr", 1)
       for i in range(len(lines)/3):
          try:
             title_line = lines[3*i]
             content_line = lines[3*i+1]
             username_node = htmltools.find_elements(title_line, "strong")[0]
             username = username_node.getContent()
             date_data = username_node.next.getContent().rstrip().lstrip()[3:]
             try:
                date = self._parseCommentDate(date_data)
             except:
                date = ""
             content = content_line.getContent()
             res.append(TorrentSearch.Plugin.TorrentResultComment(content, date, username))
          except:
             pass
    except:
       pass
    try:
       filelist = TorrentSearch.Plugin.FileList()
       table = htmltools.find_elements(htmltools.find_elements(tree.getRootElement(), "div", **{'class':'torrentFiles'})[0], "tbody")[0]
       for i in htmltools.find_elements(table, "tr", 1)[1:]:
          filename,size = htmltools.find_elements(i, "td")
          filename = filename.getContent()
          size = size.getContent().replace('Bytes', 'B')
          filelist.append(filename,size)
       self.filelist = filelist
       self.filelist_loaded = True
    except:
       pass
    return res
Esempio n. 12
0
 def _do_get_link(self):
    i=len(self.reflink)-1
    while self.reflink[i]!='/':
       i-=1
    url=self.reflink[:i+1]+urllib.quote_plus(self.reflink[i+1:])
    utype,path=urllib.splittype(url)
    host,path=urllib.splithost(path)
    c=httplib.HTTPConnection(host)
    c.request('GET',path)
    resp=c.getresponse()
    content=resp.read()
    tree=libxml2.htmlParseDoc(content,"utf-8")
    link=htmltools.find_elements(tree.getRootElement(),id="downloadLink")[0]
    return link.prop('href')
Esempio n. 13
0
 def _try_login(self):
    c=httplib2.Http()
    username,password=self.credentials
    resp,content=c.request('http://forum.tntvillage.scambioetico.org/tntforum/index.php?act=Login&CODE=00')
    data=urllib.urlencode({'UserName':username,'PassWord':password,'CookieDate':'1','referer':''})
    headers={'Content-type':'application/x-www-form-urlencoded', 'Cookie':resp['set-cookie']}
    resp,content=c.request("http://forum.tntvillage.scambioetico.org/tntforum/index.php?act=Login&CODE=01","POST",data,headers)
    if 'set-cookie' in resp and 'member_id' in resp['set-cookie']:
       cookie=self._app.parse_cookie(resp['set-cookie'])
    else:
       return None
    tree=libxml2.htmlParseDoc(content,"utf-8")
    url=htmltools.find_elements(tree.getRootElement(), "a")[0].prop('href')
    headers={'Cookie':cookie}
    resp,content=c.request(url, 'GET',headers=headers)
    return cookie
Esempio n. 14
0
 def _run_search(self,pattern):
    #TODO; Retrieve number of seeders and leechers when available
    href="http://eztv.it/search/"
    headers={'Content-type':'application/x-www-form-urlencoded'}
    data=urllib.urlencode({'SearchString1':pattern,'SearchString':'',"search":"Search"})
    resp,content=self.http_queue_request(href,"POST",data,headers)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    div=htmltools.find_elements(tree.getRootElement(),"div",id="tooltip")[0]
    restable=div.nextElementSibling()
    try:
       self.results_count=len(htmltools.find_elements(restable,"tr",1,**{'class':'forum_header_border'}))
    except:
       pass
    lines=htmltools.find_elements(restable,"tr",1,**{'class':'forum_header_border'})
    for i in lines:
       try:
          link=htmltools.find_elements(htmltools.find_elements(i,"td")[1],"a")[0]
          label=link.getContent()
          link=urllib.basejoin(href,link.prop('href'))
          resp,content=self.http_queue_request(link)
          itemtree=libxml2.htmlParseDoc(content,"utf-8")
          torrent_link=htmltools.find_elements(itemtree.getRootElement(),"a",**{'class':'download_1'})[0].prop('href')
          magnet_link=htmltools.find_elements(itemtree.getRootElement(),"a",**{'class':'magnet'})[0].prop('href')
          data=str(itemtree)
          j=data.index("Filesize:")
          data=data[j:]
          j=data.index(" ")+1
          data=data[j:]
          j=data.index("B")+1
          size=data[:j]
          data=str(itemtree)
          j=data.index("Released:")
          data=data[j:]
          j=data.index(" ")+1
          data=data[j:]
          j=data.index("<")
          date=data[:j]
          day,month,year=date.split(" ")
          day=eval(day[:-2])
          month=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"].index(month)+1
          year=eval(year)
          date=datetime.date(year,month,day)
          self.add_result(EZTVPluginResult(label,date,size,torrent_link,magnet_link))
       except:
          pass
       if self.stop_search:
          return
Esempio n. 15
0
 def _run_search(self,pattern,page=1,href=None):
    if href==None:
       href="http://www.torrentzap.com/search.php?q="+urllib.quote_plus(pattern)
    resp,content=self.http_queue_request(href)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    try:
       td=htmltools.find_elements(tree.getRootElement(),"td",id="content_cap_tab_cont")[0]
       data=td.getContent()
       i=data.index('(')
       data=data[i+1:]
       i=data.index(' ')
       self.results_count=max(eval(data[:i])-5, 0)
    except:
       pass
    restable = htmltools.find_elements(htmltools.find_elements(tree.getRootElement(), "div", **{'class':'listing'})[0], "table")[0]
    lines=htmltools.find_elements(restable,"tr")[1:]
    for i in lines:
       try:
          date,name,links,size,seeders,leechers,health=htmltools.find_elements(i,"td",1)
          date=self._parseDate(date.getContent())
          link=htmltools.find_elements(name,"a")[0]
          label=link.getContent()
          link=urllib.basejoin(href,link.prop('href'))
          size=size.getContent().upper()
          j=0
          while j<len(size) and size[j] in "0123456789.":
             j+=1
          if j<len(size):
             size=size[:j]+" "+size[j:]
          seeders=eval(seeders.getContent())
          leechers=eval(leechers.getContent())
          self.add_result(TorrentzapPluginResult(label,date,size,seeders,leechers,link))
       except:
          pass
       if self.stop_search:
          return
    if not self.stop_search:
       try:
          div=htmltools.find_elements(tree.getRootElement(),"div",**{'class':'search_stat'})[0]
          link=div.lastElementChild()
          if link.name=="a":
             url=urllib.basejoin(href,link.prop('href'))
             self._run_search(pattern,0,url)
       except:
          pass
Esempio n. 16
0
 def _run_search(self,pattern,page=1,href=None):
    if href==None:
       href="http://en.kickasstorrents.com/search/%s/"%urllib.quote(pattern)
    resp,content=self.http_queue_request(href)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    div=htmltools.find_elements(tree.getRootElement(),"div",**{'class':'mainpart'})[0]
    try:
       span = htmltools.find_elements(htmltools.find_elements(tree.getRootElement(), "div", **{'class':'tabs'})[1].next.next, "h2")[0]
       restable = span.next
       data=span.getContent().rstrip()
       i=len(data)-1
       while data[i] in "0123456789":
          i-=1
       self.results_count=eval(data[i+1:])
    except:
       pass
    lines=htmltools.find_elements(restable,"tr")[1:]
    for i in lines:
       try:
          links,size,nbfiles,date,seeders,leechers=htmltools.find_elements(i,"td")
          size=size.getContent()
          seeders=eval(seeders.getContent())
          leechers=eval(leechers.getContent())
          div=htmltools.find_elements(links,"div",**{'class':'torrentname'})[0]
          link=htmltools.find_elements(div,"a")[1]
          label=""
          for j in link.getContent().splitlines():
             label+=j
          link=urllib.basejoin(href,link.prop('href'))
          resp,content=self.http_queue_request(link, headers={'Cookie': 'country_code=en'})
          itemtree=libxml2.htmlParseDoc(content,"utf-8")
          try:
             div=htmltools.find_elements(itemtree.getRootElement(),"div",id="threeButs")[0]
          except:
             div=htmltools.find_elements(itemtree.getRootElement(),"div",**{"class":"buttonsline downloadButtonGroup"})[0]
          torrent,magnet=htmltools.find_elements(div,"a")[:2]
          torrent=urllib.basejoin(link,torrent.prop('href'))
          magnet=magnet.prop('href')
          if "&" in magnet:
             i=magnet.index('&')
             magnet=magnet[:i]
          data=div.next.next.next.next.children.getContent().rstrip().lstrip()[9:][:-2].rstrip()
          data=data.split(" ")
          j=0
          while j<len(data):
             if data[j]=="":
                del data[j]
             else:
                j+=1
          month,day,year=data
          day=eval(day[:-1])
          month=['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'].index(month)+1
          year=eval(year)
          date=datetime.date(year,month,day)
          self.add_result(KickassTorrentsPluginResult(label,date,size,seeders,leechers,torrent,magnet))
       except:
          pass
       if self.stop_search:
          return
    if not self.stop_search:
       try:
          try:
             pager=htmltools.find_elements(tree.getRootElement(),"div",**{'class':'pages'})[0]
          except:
             pager=None
          if pager:
             pages=htmltools.find_elements(pager,"a")
             i=0
             must_continue=False
             while i<len(pages) and not must_continue:
                p=pages[i]
                try:
                   pn=eval(pages[i].getContent())
                   if pn>page:
                      must_continue=True
                   else:
                      i+=1
                except:
                   i+=1
             if must_continue:
                self._run_search(pattern,pn,urllib.basejoin(href,pages[i].prop('href')))
       except:
          pass
Esempio n. 17
0
 def _run_search(self,pattern,page=1,href=None):
    if href==None:
       href="http://www.torrentbit.net/search/?torrent="+urllib.quote_plus(pattern)
    resp,content=self.http_queue_request(href)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    td=htmltools.find_elements(tree.getRootElement(),"td",id="main")[0]
    try:
       h=htmltools.find_elements(td,"h1")[0]
       data=h.getContent().rstrip().lstrip()
       i=len(data)-1
       while i>=0 and not data[i] in "0123456789":
          i-=1
       j=i
       while j>=0 and data[j] in "0123456789":
          j-=1
       self.results_count=eval(data[j+1:i+1])
    except:
       pass
    div=htmltools.find_elements(htmltools.find_elements(td,"div",**{'class':'t_list'})[0],"tbody")[0]
    lines=htmltools.find_elements(div,"tr")
    for i in lines:
       try:
          date,descr,title,size,rts,seeders,leechers,dl,cat=htmltools.find_elements(i,"td")
          date=date.getContent().replace(chr(194)+chr(160)," ")
          day,month,year=date.split(" ")
          while day[0]=="0":
             day=day[1:]
          day=eval(day)
          month=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"].index(month)+1
          year=eval(year)
          date=datetime.date(year,month,day)
          link=htmltools.find_elements(title,"a")[0]
          label=link.getContent()
          link=urllib.basejoin(href,urllib.quote(link.prop('href')))
          size=size.getContent().replace(chr(194)+chr(160)," ")
          seeders=eval(seeders.getContent())
          leechers=eval(leechers.getContent())
          resp,content=self.http_queue_request(link)
          itemtree=libxml2.htmlParseDoc(content,"utf-8")
          table=htmltools.find_elements(itemtree.getRootElement(),"table",**{'class':'tor_item'})[0]
          thislines=htmltools.find_elements(table,"tr")
          for j in thislines:
             if htmltools.find_elements(j, "th")[0].getContent()=="Download torrent:":
                itemlink=urllib.basejoin(link,htmltools.find_elements(j,"a")[0].prop('href'))
                break
          hashvalue=htmltools.find_elements(lines[4],"td")[0].getContent()
          magnet="magnet:?xt=urn:btih:"+hashvalue
          self.add_result(TorrentbitPluginResult(label,date,size,seeders,leechers,itemlink,magnet))
       except:
          pass
       if self.stop_search:
          return
    if not self.stop_search:
       try:
          try:
             pager=htmltools.find_elements(tree.getRootElement(),"div",id="pagination")[0]
          except:
             pager=None
          if pager:
             nextlink=htmltools.find_elements(pager,"a",title="Next page")
             if nextlink:
                nextlink=urllib.basejoin(href,nextlink[0].prop('href'))
                self._run_search(pattern,1,nextlink)
       except:
          pass
Esempio n. 18
0
 def _run_search(self,pattern,href=None):
    if href==None:
       href="http://www.torrent411.com/search/"+urllib.quote_plus(pattern)
    resp,content=self.http_queue_request(href)
    content=_codecs.utf_8_encode(_codecs.latin_1_decode(content)[0])[0]
    tree=libxml2.htmlParseDoc(content,"utf-8")
    pager=htmltools.find_elements(htmltools.find_elements(tree.getRootElement(),"table",**{'class':'NB-frame'})[1],"p")[0]
    try:
       b=htmltools.find_elements(pager,"b")[-1]
       data=b.getContent()
       i=len(data)-1
       while data[i] in "012346789":
          i-=1
       self.results_count=eval(data[i+1:])
    except:
       pass
    restable=htmltools.find_elements(pager.next.next,"table")[0]
    restable=htmltools.find_elements(restable,"table")[1]
    body=htmltools.find_elements(restable,"tbody")[0]
    lines=htmltools.find_elements(body,"tr",1)
    for i in lines:
       try:
          cat,link,a,date,b,c,d,e,f,g,h,i,size,j,seeders,leechers=htmltools.find_elements(i,"td")
          date=date.getContent().replace(chr(194)+chr(160)+"at"+chr(194)+chr(160)," ")
          date=time.strptime(date,"%Y-%m-%d %H:%M:%S")
          date=datetime.date(date.tm_year,date.tm_mon,date.tm_mday)
          size=size.getContent().replace(chr(194)+chr(160)," ")
          seeders=eval(seeders.getContent())
          leechers=eval(leechers.getContent())
          link=htmltools.find_elements(link,"a")[0]
          label=link.prop('title')
          link=urllib.basejoin("http://www.torrent411.com",link.prop('href'))
          resp,content=self.http_queue_request(link)
          content=_codecs.utf_8_encode(_codecs.latin_1_decode(content)[0])[0]
          itemtree=libxml2.htmlParseDoc(content,"utf-8")
          table=htmltools.find_elements(itemtree.getRootElement(),"table",**{'cellpadding':'3'})[1]
          desc,name,torrent,cat,siz,hashvalue=htmltools.find_elements(table,"tr")[:6]
          torrent=htmltools.find_elements(torrent,"a")[0].prop('href')
          hashvalue=htmltools.find_elements(hashvalue,"td")[1].getContent()
          self.add_result(Torrent411PluginResult(label,date,size,seeders,leechers,torrent,hashvalue))
       except:
          pass
       if self.stop_search:
          return
    if not self.stop_search:
       try:
          links=htmltools.find_elements(pager,"a")
          next_link=None
          for i in links:
             if i.getContent()=="Next"+chr(194)+chr(160)+">>":
                next_link=i
          if next_link:
             link=urllib.basejoin("http://www.torrent411.com",next_link.prop('href'))
             self._run_search(pattern,link)
       except:
          pass
Esempio n. 19
0
 def _run_search(self,pattern,href=None):
    if href==None:
       href="http://linuxtracker.org/index.php?page=torrents&search="+urllib.quote_plus(pattern)
    resp,content=self.http_queue_request(href)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    try:
       pager=htmltools.find_elements(tree.getRootElement(),"form",name="change_page")[0]
       options=htmltools.find_elements(pager,"option")
       self.results_count=50*len(options)
    except:
       pager=None
       self.results_count=50
    restable=htmltools.find_elements(tree.getRootElement(),"table",**{'class':'lista'})[1]
    lines=htmltools.find_elements(restable,"tr")[1:]
    for i in lines:
       try:
          cat,link,torrent_link,date,seeders,leechers,a,b=htmltools.find_elements(i,"td")
          label=link.getContent()
          link=urllib.basejoin(href,htmltools.find_elements(link,"a")[0].prop('href'))
          torrent_link=urllib.basejoin(href,htmltools.find_elements(torrent_link,"a")[0].prop('href'))
          date=time.strptime(date.getContent(),"%d/%m/%Y")
          date=datetime.date(date.tm_year,date.tm_mon,date.tm_mday)
          seeders=eval(seeders.getContent())
          leechers=eval(leechers.getContent())
          resp,content=self.http_queue_request(link)
          itemtree=libxml2.htmlParseDoc(content,"utf-8")
          table=htmltools.find_elements(itemtree.getRootElement(),"table",**{'class':'coltable'})[0]
          size=None
          hashvalue=None
          for td in htmltools.find_elements(table,"td"):
             if td.getContent()=="Size" and size==None:
                size=td.next.next.getContent()
             if td.getContent()=="Info Hash" and hashvalue==None:
                hashvalue=td.next.next.getContent()
          self.add_result(linuxTRACKERPluginResult(label,date,size,seeders,leechers,torrent_link,hashvalue))
       except:
          pass
       if self.stop_search:
          return
    if not self.stop_search:
       try:
          if pager:
             spans=htmltools.find_elements(pager,"span")
             i=0
             while i<len(spans) and spans[i].prop('class')!='pagercurrent':
                i+=1
             i+=1
             if i<len(spans):
                link=htmltools.find_elements(spans[i],"a")[0]
                link=urllib.basejoin(href,link.prop('href'))
                self._run_search(pattern,link)
       except:
          pass
Esempio n. 20
0
 def _run_search(self,pattern,page=1,href=None):
    if href==None:
       href="http://www.torrenthound.com/search/"+urllib.quote_plus(pattern)
    resp,content=self.http_queue_request(href)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    try:
       count_div=htmltools.find_elements(tree.getRootElement(),"span",id="subsearch")[0]
       data=count_div.getContent()
       i=data.index(' ')
       self.results_count=eval(data[:i])
    except:
       pass
    restable=htmltools.find_elements(htmltools.find_elements(tree.getRootElement(),"div",id="maindiv")[0],"table")[1]
    lines=htmltools.find_elements(restable,"tr")[1:]
    for i in lines:
       try:
          link,date,size,seeders,leechers,health=htmltools.find_elements(i,"td")
          link=htmltools.find_elements(link,"a")[0]
          label=""
          j=link.children
          while j and j.name!="br":
             label+=j.getContent()
             j=j.next
          label_lines=label.splitlines()
          label=""
          for j in label_lines:
             if j:
                label+=j
          label=label.rstrip().lstrip()
          link=urllib.basejoin(href,link.prop('href'))
          date=self._parseDate(htmltools.find_elements(date,"span")[0].children.getContent().rstrip().lstrip())
          size=size.getContent().upper()
          data=htmltools.find_elements(seeders,"span")[0].getContent().lstrip().rstrip()
          j=0
          while j<len(data) and data[j] in "0123456789":
             j+=1
          seeders=eval(data[:j])
          data=htmltools.find_elements(leechers,"span")[0].getContent().lstrip().rstrip()
          j=0
          while j<len(data) and data[j] in "0123456789":
             j+=1
          leechers=eval(data[:j])
          resp,content=self.http_queue_request(link)
          itemtree=libxml2.htmlParseDoc(content,"utf-8")
          div=htmltools.find_elements(itemtree.getRootElement(),"div",id="torrent")[0]
          link=urllib.basejoin(href,htmltools.find_elements(div,"a")[0].prop('href'))
          try:
             infotable=htmltools.find_elements(itemtree.getRootElement(),"table",**{'class':'infotable'})[0]
             hashline=htmltools.find_elements(itemtree.getRootElement(),"tr")[8]
             hashvalue=htmltools.find_elements(hashline,"td")[1].getContent()
          except:
             hashvalue=None
          try:
             tmenu = htmltools.find_elements(itemtree.getRootElement(), "ul", id="tmenu")[0]
             nb_comments_cell = htmltools.find_elements(tmenu, "li")[2]
             data = nb_comments_cell.getContent()
             j=data.index("(")+1
             data=data[j:]
             j=data.index(")")
             nb_comments=int(data[:j])
          except:
             nb_comments=0
          self.add_result(TorrentHoundTorrentPluginResult(label,date,size,seeders,leechers,link,hashvalue,nb_comments))
       except:
          pass
       if self.stop_search:
          return
    if not self.stop_search:
       try:
          try:
             pager=restable.parent.next.next
          except:
             pager=None
          if pager:
             pages=htmltools.find_elements(pager,"a")
             i=0
             must_continue=False
             while i<len(pages) and not must_continue:
                p=pages[i]
                try:
                   pn=eval(pages[i].getContent())
                   if pn>page:
                      must_continue=True
                   else:
                      i+=1
                except:
                   i+=1
             if must_continue:
                self._run_search(pattern,pn,urllib.basejoin(href,pages[i].prop('href')))
       except:
          pass
Esempio n. 21
0
 def _do_get_link(self):
    c=httplib2.Http()
    headers={'Cookie':self.plugin.login_cookie}
    resp,content=c.request(self.reflink,headers=headers)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    return urllib.basejoin(self.reflink,htmltools.find_elements(tree.getRootElement(),"a",**{'class':'download_link'})[0].prop('href'))
Esempio n. 22
0
   def _do_get_details(self):

      c=httplib2.Http()

      resp,content=c.request(self.reflink)

      tree=libxml2.htmlParseDoc(content,"utf-8")

      self._link=urllib.basejoin(self.reflink, htmltools.find_elements(tree.getRootElement(),"a",onmouseover="return overlib('Click here to download torrent')")[0].prop('href'))

      img=htmltools.find_elements(tree.getRootElement(),"img",alt=self.label)

      if img:

         self.poster=img[0].prop('src')

      else:

         self.poster=None

      self.poster_loaded = True

      files_div = htmltools.find_elements(tree.getRootElement(), "div", id="files")

      filelist = TorrentSearch.Plugin.FileList()

      if len(files_div)==1:

         files_div = files_div[0]

         for i in htmltools.find_elements(files_div, "tr")[1:]:

            filename,size = htmltools.find_elements(i,"td")

            filename=filename.getContent()

            size=size.getContent()

            filelist.append(filename,size)

      self.filelist = filelist

      self.filelist_loaded = True

      comments_link = htmltools.find_elements(tree.getRootElement(), "a", name="comments")

      comments = TorrentSearch.Plugin.CommentsList()

      try:

         if len(comments_link)==1:

            node = comments_link[0]

            while node.name!="table":

               node=node.next 

            comments_lines = htmltools.find_elements(node, "tr")

            for i in range(len(comments_lines)/2):

               username,date = htmltools.find_elements(comments_lines[2*i], "td")

               username=username.getContent()

               try:

                  date_str=date.getContent()

                  date,hour=date_str.split(" ")

                  day,month,year=date.split("/")

                  hour,minute,second=hour.split(":")

                  while day[0]=="0":

                     day=day[1:]

                  while month[0]=="0":

                     month=month[1:]

                  while hour[0]=="0":

                     hour=hour[1:]

                  while minute[0]=="0":

                     minute=minute[1:]

                  while second[0]=="0":

                     second=second[1:]

                  day=int(day)

                  month=int(month)

                  year=int(year)

                  hour=int(hour)

                  minute=int(minute)

                  second=int(second)

                  date = datetime.datetime(year,month,day,hour,minute,second)

               except:

                  date = None

               content = htmltools.find_elements(comments_lines[2*i+1], "td")[1].getContent()

               comments.append(TorrentSearch.Plugin.TorrentResultComment(content,date,username))

      except:

         pass

      self.comments=comments

      self.comments_loaded=True
Esempio n. 23
0
 def _run_search(self,pattern,page=1,href=None):
    if href==None:
       href="http://www.sumotorrent.com/searchResult.php?search="+urllib.quote_plus(pattern)
    try:
       resp,content=self.http_queue_request(href)
    except httplib2.FailedToDecompressContent:
       if not self.stop_search:
          self._run_search(pattern,page,href)
       return
    tree=libxml2.htmlParseDoc(content,"utf-8")
    try:
       count_div=htmltools.find_elements(htmltools.find_elements(tree.getRootElement(),id="trait")[0].parent,"div")[0]
       data=count_div.getContent()
       i=data.index("(")+1
       data=data[i:]
       i=data.index(" ")
       data=data[:i]
       self.results_count=eval(data)
    except:
       pass
    restable=htmltools.find_elements(tree.getRootElement(),id="panel")[0].next
    while restable and restable.type!="element":
       restable=restable.next
    lines=htmltools.find_elements(restable,"tr",1)
    for i in lines[1:]:
       try:
          if i.hasProp('class') and not i.hasProp('id'):
             cells=htmltools.find_elements(i,"td",1)
             date,typ,name,comments,links,size,seeds,leeches,more=cells
             date=self._parseDate(date.getContent().lstrip().rstrip())
             refmagnet=urllib.basejoin(href,htmltools.find_elements(name,"a")[0].prop('href'))
             name_link = htmltools.find_elements(name,"a")[0]
             details_url = urllib.basejoin(href, name_link.prop("href"))
             name=name_link.getContent().lstrip().rstrip()
             nb_comments_zone = htmltools.find_elements(comments, "strong")
             nb_comments = 0
             try:
                if len(nb_comments_zone)==1:
                   nb_comments = int(nb_comments_zone[0].getContent().rstrip().lstrip())
             except:
                pass
             size=size.getContent().lstrip().rstrip()
             seeds=eval(seeds.getContent().lstrip().rstrip())
             leeches=eval(leeches.getContent().lstrip().rstrip())
             result=SUMOTorrentPluginResult(name,date,size,seeds,leeches,htmltools.find_elements(links,"a")[0].prop('href'),refmagnet,nb_comments,details_url)
             self.add_result(result)
       except:
          pass
       if self.stop_search:
          return
    if not self.stop_search:
       try:
          pager=htmltools.find_elements(tree.getRootElement(),id="pager")
          if pager:
             pages=htmltools.find_elements(pager[0],"a")
             i=0
             must_continue=False
             while i<len(pages) and not must_continue:
                p=pages[i]
                try:
                   pn=eval(pages[i].getContent())
                   if pn>page:
                      must_continue=True
                   else:
                      i+=1
                except:
                   i+=1
             if must_continue:
                self._run_search(pattern,pn,pages[i].prop('href'))
       except:
          pass
Esempio n. 24
0
   def _run_search(self,pattern,page=1,href=None):

      if href==None:

         href="http://rarbg.com/torrents.php?search="+urllib.quote_plus(pattern)

      resp,content=self.http_queue_request(href)

      tree=libxml2.htmlParseDoc(content,"utf-8")

      try:

         div=htmltools.find_elements(tree.getRootElement(),"div",**{'class':'wp-pagenavi'})[0]

         data=htmltools.find_elements(div,"a")[-1].getContent()

         i=len(data)-1

         while data[i] in "0123456789":

            i-=1

         self.results_count=eval(data[i+1:])

      except:

         pass

      cats=htmltools.find_elements(tree.getRootElement(),"select",name="category")[0]

      categories={}

      for i in htmltools.find_elements(cats,"option"):

         categories[i.prop('value')]=i.getContent()

      lines=htmltools.find_elements(tree.getRootElement(),"tr",**{'class':'lista2'})

      for i in lines:

         try:

            cat,link,date,size,seeders,leechers,comments,c=htmltools.find_elements(i,"td")

            cat=htmltools.find_elements(cat,"a")[0].prop('href')

            j=cat.index('=')

            cat=cat[j+1:]

            if cat in categories:

               cat=categories[cat]

            else:

               cat=""

            cat=self._parseCat(cat)

            link=htmltools.find_elements(link,"a")[0]

            label=link.getContent()

            link=urllib.basejoin(href,link.prop('href'))

            hashvalue=link.split('/')[-2]

            date=self._parseDate(date.getContent())

            size=size.getContent()

            seeders=eval(seeders.getContent())

            leechers=eval(leechers.getContent())

            nb_comments=eval(comments.getContent())

            self.add_result(RARBGTorrentPluginResult(label,date,size,seeders,leechers,link,hashvalue,cat,nb_comments))

         except:

            pass

         if self.stop_search:

            return

      if not self.stop_search:

         try:

            div=htmltools.find_elements(tree.getRootElement(),"div",**{'class':'wp-pagenavi'})[0]

            cspan=htmltools.find_elements(div,"span",**{"class":"current"})[0]

            a=cspan.next.next

            if a.name=="a":

               self._run_search(pattern,0,urllib.basejoin(href,a.prop('href')))

         except:

            pass

      del tree
Esempio n. 25
0
   def _run_search(self,pattern,page=1,href=None):

      if href==None:

         href="http://www.btscene.eu/search/term/%s/cat/0/"%urllib.quote_plus(pattern)

      resp, content=self.http_queue_request(href)

      tree=libxml2.htmlParseDoc(content,"utf-8")

      try:

         data = htmltools.find_elements(tree.getRootElement(), "div", **{'class':'srch'})[0].getContent().rstrip().lstrip()

         i = data.index(' ')

         self.results_count=eval(data[:i])

      except:

         pass

      restable=htmltools.find_elements(tree.getRootElement(), "table", **{'class':'tor'})[0]

      lines=htmltools.find_elements(restable,"tr")[1:]

      for i in lines:

         try:

            if i.prop('id') and i.prop('id')[0]=="_":

               link,size,seeders,leechers=htmltools.find_elements(i,"td")

               date = htmltools.find_elements(link, "div", **{'class':'subinfo'})[0].children.getContent().rstrip().lstrip().split(" ", 3)

               date=self._parseDate(date[0]+" "+date[1])

               details_link = htmltools.find_elements(link,"a")[0]

               label=details_link.prop('title')

               link=urllib.basejoin(href,htmltools.find_elements(link,"a")[0].prop('href'))

               size=size.getContent()

               resp,content = self.http_queue_request(urllib.basejoin(href, details_link.prop('href')))

               itemtree=libxml2.htmlParseDoc(content,"utf-8")

               try:

                  data = htmltools.find_elements(htmltools.find_elements(itemtree.getRootElement(), "ul", **{'class':'tabbernav'})[0], "li")[3].getContent()

                  j = data.index('(')+1

                  data = data[j:]

                  j = data.index(')')

                  data = data[:j]

                  nb_comments = int(data)

               except:

                  nb_comments = 0

               filelist = TorrentSearch.Plugin.FileList()

               try:

                  ul = htmltools.find_elements(htmltools.find_elements(itemtree.getRootElement(), "div", **{'class':'files_view'})[0], "ul")[0]

                  for item in htmltools.find_elements(ul, "li"):

                     if item.prop("class")!="folder":

                        data = item.getContent()

                        j = len(data)-1

                        while data[j]!=" ":

                           j-=1

                        j-=1

                        while data[j]!=" ":

                           j-=1

                        filename = data[:j].rstrip().lstrip()

                        filesize = data[j:].rstrip().lstrip()

                        filelist.append(filename,filesize)

               except:

                  pass

               try:

                  seeders=eval(seeders.getContent())

               except:

                  seeders=0

               try:

                  leechers=eval(leechers.getContent())

               except:

                  leechers=0

               self.add_result(BTSCENETorrentPluginResult(label,date,size,seeders,leechers,link,nb_comments,filelist))

         except:

            pass

         if self.stop_search:

            return

      if not self.stop_search:

         try:

            try:

               pager=htmltools.find_elements(tree.getRootElement(),id="f")[0].parent

            except:

               pager=None

            if pager:

               pages=htmltools.find_elements(pager,"a")

               i=0

               must_continue=False

               while i<len(pages) and not must_continue:

                  p=pages[i]

                  try:

                     pn=eval(pages[i].getContent())

                     if pn>page:

                        must_continue=True

                     else:

                        i+=1

                  except:

                     i+=1

               if must_continue:

                  self._run_search(pattern,pn,urllib.basejoin(href,pages[i].prop('href')))

         except:

            pass
Esempio n. 26
0
 def _run_search(self,pattern,href=None):
    if href==None:
       href="http://www.bitenova.org/search.php"
       headers={'Content-type':'application/x-www-form-urlencoded'}
       data={'search':pattern}
       c=httplib2.Http()
       resp,content=c.request(href,"POST",urllib.urlencode(data),headers)
       if not "location" in resp:
          resp,content=c.request(href,"POST",urllib.urlencode(data),headers)
       href=urllib.basejoin(href,resp['location'])
    c=httplib2.Http()
    resp,content=c.request(href)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    try:
       count_div=htmltools.find_elements(tree.getRootElement(),"div",id="pag")[0]
       li=htmltools.find_elements(count_div,"li")[1]
       data=li.getContent()[2:]
       i=data.index(" ")
       self.results_count=eval(data[:i])
    except:
       pass
    restable=htmltools.find_elements(tree.getRootElement(),"table",id="main_tt")[0]
    lines=htmltools.find_elements(restable,"tr")[1:]
    if len(lines)==0:
       return
    for i in lines:
       try:
          date,category,links,size,seeders,leechers=htmltools.find_elements(i,"td")
          date=date.getContent()
          date=date.replace(chr(194)+chr(160)," ")
          day,month,year=date.split(" ")
          while day[0] in "0 ":
             day=day[1:]
          day=eval(day)
          month=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"].index(month)+1
          year=time.strptime(year,"%y").tm_year
          date=datetime.date(year,month,day)
          size=size.getContent().replace(chr(194)+chr(160)," ")
          seeders=eval(seeders.getContent())
          leechers=eval(leechers.getContent())
          label=htmltools.find_elements(links,"a",**{'class':None})[0].getContent()
          link=htmltools.find_elements(links,"a")[0].prop('href')
          hashvalue=None
          try:
             url,query=urllib.splitquery(link)
             queries=query.split('&')
             for q in queries:
                key,value=q.split('=')
                if key=="id":
                   hashvalue=value
          except:
             pass
          link=urllib.basejoin(href,link)
          self.add_result(BiteNovaPluginResult(label,date,size,seeders,leechers,link,hashvalue))
       except:
          pass
       if self.stop_search:
          return
    if not self.stop_search:
       try:
          try:
             pager=htmltools.find_elements(tree.getRootElement(),"div",id="pag")[0]
          except:
             pager=None
          if pager:
             link=htmltools.find_elements(pager,"a",title="Next page")[0]
             link=urllib.basejoin(href,link.prop('href'))
             self._run_search(pattern,link)
       except:
          pass
Esempio n. 27
0
 def _run_search(self,pattern,stp=0,stn=20,first_page=True):
    headers={'Content-type':'application/x-www-form-urlencoded','Cookie':self.login_cookie}
    data={'sb':'0', 'sd':'0', 'cat':'0', 'stn':str(stn), 'filter':pattern}
    if first_page:
       data['set']='Imposta filtro'
    else:
       data['next']="Pagine successive >>"
       data['stp']=str(stp)
    data=urllib.urlencode(data)
    resp,content=self.http_queue_request("http://forum.tntvillage.scambioetico.org/tntforum/index.php?act=allreleases",'POST',data,headers)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    ucpcontent=htmltools.find_elements(tree.getRootElement(), "div", id="ucpcontent")[0]
    try:
       data=htmltools.find_elements(htmltools.find_elements(ucpcontent, "table")[1], "td")[1].getContent()
       i=0
       while not data[i] in "0123456789":
          i+=1
       j=i
       while data[j] in "0123456789":
          j+=1
       self.results_count=eval(data[i:j])
    except:
       pass
    restable=htmltools.find_elements(ucpcontent, "table")[3]
    lines=htmltools.find_elements(restable,"tr",**{'class':'row4'})
    for i in lines:
       try:
          category_link,title,releaser,group,leechers,seeders,complete,dim,peers=htmltools.find_elements(i,"td")
          link=htmltools.find_elements(category_link, "a")[0]
          label=link.getContent()
          link=link.prop('href')
          leechers=eval(leechers.getContent()[1:-1].rstrip().lstrip())
          seeders=eval(seeders.getContent()[1:-1].rstrip().lstrip())
          resp,content=self.http_queue_request(link,headers=headers)
          itemtree=libxml2.htmlParseDoc(content,"utf-8")
          date=htmltools.find_elements(htmltools.find_elements(itemtree.getRootElement(), "span", **{'class':'postdetails'})[0],"b")[0].next.getContent()
          j=date.index(',')
          date=date[:j].rstrip().lstrip()
          month,day,year=date.split(' ')
          month=['Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec'].index(month)+1
          day=eval(day)
          year=eval(year)
          date=datetime.date(year,month,day)
          torrent_link=htmltools.find_elements(itemtree.getRootElement(),"a",title="Scarica allegato")[0]
          details_table=torrent_link.parent.parent.parent
          details=htmltools.find_elements(details_table,"tr")[1:]
          hashvalue=None
          for i in details:
             try:
                key=htmltools.find_elements(i,"td")[0].getContent().rstrip().lstrip()
                value=htmltools.find_elements(i,"td")[1].getContent().rstrip().lstrip()
                if key=="Dimensione:":
                   size=value.upper()
                if key=="info_hash:":
                   hashvalue=value
             except:
                pass
          self.add_result(TNTVillagePluginResult(label,date,size,seeders,leechers,torrent_link.prop('href'),hashvalue))
       except:
          pass
       if self.stop_search:
          return
    if not self.stop_search:
       if htmltools.find_elements(tree.getRootElement(),"input",name="next"):
          stn=eval(htmltools.find_elements(tree.getRootElement(),"input", name="stn")[0].prop('value'))
          try:
             stp=eval(htmltools.find_elements(tree.getRootElement(),"input", name="stp")[0].prop('value'))
          except:
             stp=0
          self._run_search(pattern, stp, stn, False)
Esempio n. 28
0
 def _run_search(self,pattern,href=None):
    if href==None:
       href="http://www.nyaatorrents.org/?page=search&term="+urllib.quote_plus(pattern)
    resp,content=self.http_queue_request(href)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    try:
       span=htmltools.find_elements(tree.getRootElement(),"span",**{'class':'notice'})[0]
       data=span.getContent()
       i=data.index(" ")
       self.results_count=eval(data[:i])
    except:
       pass
    restable=htmltools.find_elements(tree.getRootElement(),"table",**{'class':'tlist'})[0]
    lines=htmltools.find_elements(restable,"tr")[1:]
    for i in lines:
       try:
          cells=htmltools.find_elements(i,"td")
          name=cells[1]
          torrent_link=cells[2]
          size=cells[3]
          link=htmltools.find_elements(name,"a")[0]
          label=link.getContent().rstrip().lstrip()
          link=link.prop('href')
          torrent_link=htmltools.find_elements(torrent_link,"a")[0].prop('href')
          size=size.getContent().replace('i','')
          try:
             seeders=eval(cells[4].getContent())
          except:
             seeders=-1
          try:
             leechers=eval(cells[5].getContent())
          except:
             leechers=-1
          resp,content=self.http_queue_request(link)
          itemtree=libxml2.htmlParseDoc(content,"utf-8")
          tds=htmltools.find_elements(itemtree.getRootElement(),"td")
          date=""
          for j in tds:
             if j.getContent()=="Date:":
                date=j.next.getContent()
          j=date.index(",")
          date=date[:j]
          month,day,year=date.split(" ")
          month=["Jan","Feb","Mar","Apr","May","Jun","Jul","Aug","Sep","Oct","Nov","Dec"].index(month)+1
          while day[0]=="0":
             day=day[1:]
          day=eval(day)
          year=eval(year)
          date=datetime.date(year,month,day)
          self.add_result(NyaaTorrentsPluginResult(label,date,size,seeders,leechers,torrent_link))
       except:
          pass
       if self.stop_search:
          return
    if not self.stop_search:
       try:
          pager=htmltools.find_elements(tree.getRootElement(),"table",**{'class':'tlistpages'})[0]
          links=htmltools.find_elements(pager,"a",**{'class':'page'})
          next_link=None
          for i in links:
             if i.getContent()==">":
                next_link=i
                break
          url=next_link.prop('href')
          self._run_search(pattern,url)
       except:
          pass
Esempio n. 29
0
 def _run_search(self,pattern,href=None):
    if href==None:
       href="http://isohunt.com/torrents/?ihq="+urllib.quote_plus(pattern)
    resp,content=self.http_queue_request(href)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    pager=htmltools.find_elements(tree.getRootElement(),"table",**{'class':'pager'})[0]
    try:
       b=htmltools.find_elements(pager,"b")[0]
       self.results_count=eval(b.getContent())
    except:
       pass
    restable=htmltools.find_elements(tree.getRootElement(),"table",id="serps")[0]
    lines=htmltools.find_elements(restable,"tr")[1:]
    for i in lines:
       try:
          category,age,links,size,seeders,leechers=htmltools.find_elements(i,"td")
          size=size.getContent()
          try:
             seeders=eval(seeders.getContent())
          except:
             seeders=0
          try:
             leechers=eval(leechers.getContent())
          except:
             leechers=0
          links=htmltools.find_elements(links,"a")
          link=links[1]
          br=htmltools.find_elements(link,"br")
          if br:
             label=""
             node=br[0].next
             while node:
                label+=node.getContent()
                node=node.next
          else:
             label=link.getContent()
          link=urllib.basejoin(href,link.prop('href'))
          if len(links)==3:
             link=link.replace('/download/','/torrent_details/')
          age=age.getContent()
          unit=age[-1]
          value=eval(age[:-1])
          if unit=="w":
             value*=7
          date=datetime.date.today()-datetime.timedelta(value)
          resp,content=self.http_queue_request(link)
          itemtree=libxml2.htmlParseDoc(content,"utf-8")
          torrent_link=None
          for i in htmltools.find_elements(itemtree.getRootElement(),"a"):
             if i.getContent()==" Download .torrent":
                torrent_link=i
          torrent_link=urllib.basejoin(link,torrent_link.prop('href'))
          span=htmltools.find_elements(itemtree.getRootElement(),"span",id="SL_desc")[0]
          data=span.getContent()[11:]
          j=data.index(" ")
          hashvalue=data[:j]
          self.add_result(isoHuntPluginResult(label,date,size,seeders,leechers,torrent_link,hashvalue))
       except:
          pass
       if self.stop_search:
          return
    if not self.stop_search:
       try:
          link=htmltools.find_elements(pager,"a",title="Next page")
          if link:
             self._run_search(pattern,urllib.basejoin(href,link[0].prop('href')))
       except:
          pass
Esempio n. 30
0
 def _run_search(self,pattern,page=1,href=None):
    if href==None:
       href="http://www.yourbittorrent.com/?q="+urllib.quote_plus(pattern)
    c=httplib2.Http()
    resp,content=c.request(href)
    tree=libxml2.htmlParseDoc(content,"utf-8")
    try:
       self.results_count = int(htmltools.find_elements(htmltools.find_elements(tree.getRootElement(), "div", style="float:right;margin-top:15px")[0], "b")[2].getContent().rstrip().lstrip())
    except:
       pass
    lines = []
    for i in htmltools.find_elements(tree.getRootElement(), "td", id="n"):
       lines.append(i.parent);
    for i in lines:
       try:
          links,date,size,seeders,leechers,health=htmltools.find_elements(i,"td")
          dlink,ulink=htmltools.find_elements(links,"a")
          filelist = TorrentSearch.Plugin.FileList()
          poster = None
          try:
             c=httplib2.Http()
             resp,content=c.request(urllib.basejoin(href,ulink.prop('href')))
             itemtree=libxml2.htmlParseDoc(content,"utf-8")
             table=htmltools.find_elements(htmltools.find_elements(itemtree.getRootElement(),"div",id="content")[0],"table")[0]
             line=htmltools.find_elements(table,"tr")[1]
             cell=htmltools.find_elements(line,"td")[3]
             hashvalue=cell.getContent()
             h3s = htmltools.find_elements(itemtree.getRootElement(), "h3")
             files_h3 = None
             for h3 in h3s:
                if h3.getContent()=="Files":
                   files_h3 = h3
             if files_h3:
                for file_line in htmltools.find_elements(files_h3.next, "tr")[1:]:
                   try:
                      filepix,filename,filesize = htmltools.find_elements(file_line,"td")
                      filename=filename.getContent()
                      filesize=filesize.getContent()
                      filelist.append(filename,filesize)
                   except:
                      pass
             h1s = htmltools.find_elements(itemtree.getRootElement(), "h1")
             cover_h1 = None
             for h1 in h1s:
                if h1.getContent()=="Cover Art":
                   cover_h1 = h1
             if cover_h1:
                try:
                   poster = htmltools.find_elements(cover_h1.parent, "img")[0].prop("src")
                except:
                   pass
          except:
             hashvalue=None
          label=ulink.getContent()
          link=urllib.basejoin(href,dlink.prop('href'))
          size=size.getContent().upper()
          seeders=eval(seeders.getContent())
          leechers=eval(leechers.getContent())
          date=self._parseDate(date.getContent())
          self.add_result(yourBITTORRENTTorrentPluginResult(label,date,size,seeders,leechers,link,hashvalue,filelist,poster))
       except:
          pass
       if self.stop_search:
          return
    if not self.stop_search:
       try:
          try:
             pager=htmltools.find_elements(tree.getRootElement(),"div",**{"class":"pagnation_l"})[0]
          except:
             pager=None
          if pager:
             pages=htmltools.find_elements(pager,"a")
             i=0
             must_continue=False
             while i<len(pages) and not must_continue:
                p=pages[i]
                try:
                   pn=eval(pages[i].getContent())
                   if pn>page:
                      must_continue=True
                   else:
                      i+=1
                except:
                   i+=1
             if must_continue:
                url="http://www.yourbittorrent.com/?q=%s&page=%d"%(urllib.quote_plus(pattern),pn)
                self._run_search(pattern,pn,url)
       except:
          pass