Esempio n. 1
0
    def search(self, title):
        sources = []
        print("FREEDOC SEARCH", title)
        query = self.search_link % title
        query = urlparse.urljoin(self.base_link, query)
        r = client.request(query)
        r = BeautifulSoup(r)
        r = r.findAll('div', attrs={'class': 'film'})
        for items in r:

            url = items.findAll('a')[0]['href'].encode('utf-8')
            img = items.findAll('img')[0]['src'].encode('utf-8')
            title = url.split('/')[-1]
            title = cleantitle.get2(title)
            url = urlparse.urljoin(self.base_link, url)
            img = urlparse.urljoin(self.base_link, img)
            meta = {"poster": img, "title": title}
            meta = urllib.quote_plus(json.dumps(meta))

            sources.append({
                'title': title,
                'provider': self.name,
                'url': url,
                'poster': img,
                'meta': meta,
                'action': 'freedoc_resolve'
            })
        return sources
Esempio n. 2
0
    def search(self, title):
        sources = []

        query = self.search_link % title
        query = urlparse.urljoin(self.base_link, query)
        r = client.request(query)
        posts = client.parseDOM(r, 'item')

        for items in posts:

            url = client.parseDOM(items, 'link')[0].encode('utf-8')
            title = client.parseDOM(items, 'title')[0].encode('utf-8')

            img = client.parseDOM(items, 'img', ret='src')[0].encode('utf-8')
            print("DOCHEAVEN SEARCH", url, title, img)

            title = cleantitle.get2(title)
            url = urlparse.urljoin(self.base_link, url)
            img = urlparse.urljoin(self.base_link, img)
            meta = {"poster": img, "title": title}
            meta = urllib.quote_plus(json.dumps(meta))

            sources.append({
                'title': title,
                'provider': self.name,
                'url': url,
                'poster': img,
                'meta': meta,
                'action': 'topdocs_resolve'
            })
        return sources
Esempio n. 3
0
    def search(self, title):
        sources = []

        query = self.search_link % title
        query = urlparse.urljoin(self.base_link, query)
        r = client.request(query)
        query = BeautifulSoup(r)
        r = query.findAll('div', attrs={'class': 'item'})
        for items in r:
            href = items.findAll('a')[0]['href'].encode('utf-8')
            img = items.findAll('img')[0]['src'].encode('utf-8')
            title = items.findAll('img')[0]['alt'].encode('utf-8')
            title = cleantitle.get2(title)
            href = urlparse.urljoin(self.base_link, href)
            img = urlparse.urljoin(self.base_link, img)
            meta = {"poster": img, "title": title}
            meta = urllib.quote_plus(json.dumps(meta))

            sources.append({
                'title': title,
                'provider': self.name,
                'url': href,
                'poster': img,
                'meta': meta,
                'action': 'docstorm_resolve'
            })
        return sources
Esempio n. 4
0
    def cat(self, url):
	
        try:

			r = client.request(url)
			query = BeautifulSoup(r)
			r = query.findAll('div', attrs = {'class': 'post-thumbnail'})
			for items in r:
				
				href = items.findAll('a')[0]['href'].encode('utf-8')
				img = items.findAll('img')[0]['src'].encode('utf-8')
				title = items.findAll('a')[0]['title'].encode('utf-8')
				title = cleantitle.get2(title)
				href = urlparse.urljoin(self.base_link, href)
				img = urlparse.urljoin(self.base_link, img)
				meta = {"poster": img , "title" : title}
				meta = urllib.quote_plus(json.dumps(meta))
				control.addDirMeta(title,href,'docheaven_resolve', img, control.fanart, meta)
				
        except:
			pass
			
        try:
			n = query.findAll('div', attrs = {'class': 'numeric-nav'})
			for x in n:
				pages = x.findAll('a')
				for p in pages:
					page = p['href'].encode('utf-8')
					page = urlparse.urljoin(self.base_link, page)
					page_title = p.string
					if not page == url:
						control.addDir("[COLOR yellow]PAGE:[/COLOR] " + page_title,page,'docheaven_cat',control.fanart,control.fanart)
        except:
			pass
Esempio n. 5
0
    def cat(self, url):

        try:

            r = client.request(url)
            query = BeautifulSoup(r)
            r = query.findAll('div', attrs={'class': 'item'})
            for items in r:
                href = items.findAll('a')[0]['href'].encode('utf-8')
                img = items.findAll('img')[0]['src'].encode('utf-8')
                title = items.findAll('img')[0]['alt'].encode('utf-8')
                title = cleantitle.get2(title)
                href = urlparse.urljoin(self.base_link, href)
                img = urlparse.urljoin(self.base_link, img)
                meta = {"poster": img, "title": title}
                meta = urllib.quote_plus(json.dumps(meta))
                control.addDirMeta(title, href, 'docstorm_resolve', img,
                                   control.fanart, meta)

        except:
            pass

        try:
            n = query.findAll('link', attrs={'rel': 'next'})
            for p in n:
                page = p['href'].encode('utf-8')
                page_title = page.split('/')[-1]
                if page_title == '': page_title = '1'
                page = urlparse.urljoin(self.base_link, page)
                if not page == url:
                    control.addDir("[COLOR yellow]PAGE:[/COLOR] " + page_title,
                                   page, 'docstorm_cat', control.fanart,
                                   control.fanart)
        except:
            pass

        try:
            n = query.findAll('link', attrs={'rel': 'prev'})
            for p in n:
                page = p['href'].encode('utf-8')
                page_title = page.split('/')[-1]
                if page_title == '': page_title = '1'
                page = urlparse.urljoin(self.base_link, page)
                if not page == url:
                    control.addDir("[COLOR yellow]PAGE:[/COLOR] " + page_title,
                                   page, 'docstorm_cat', control.fanart,
                                   control.fanart)
        except:
            pass
Esempio n. 6
0
    def cat(self, url):

        r = client.request(url)
        r = BeautifulSoup(r)
        r = r.findAll('div', attrs = {'class': 'film'})
        for items in r:
			print ("FREEDOC 1", items)
			url = items.findAll('a')[0]['href'].encode('utf-8')
			img = items.findAll('img')[0]['src'].encode('utf-8')
			title = url.split('/')[-1]
			title = cleantitle.get2(title)
			url = urlparse.urljoin(self.base_link, url)
			img = urlparse.urljoin(self.base_link, img)
			meta = {"poster": img , "title" : title}
			meta = urllib.quote_plus(json.dumps(meta))
			control.addDirMeta(title,url,'freedoc_resolve', img, control.fanart, meta)