Пример #1
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            r = client.request(urlparse.urljoin(self.base_link, url))

            links = dom_parser.parse_dom(r, 'table')
            links = [
                i.content for i in links if dom_parser.parse_dom(
                    i, 'span', attrs={'class': re.compile('linkSearch(-a)?')})
            ]
            links = re.compile('(<a.+?/a>)', re.DOTALL).findall(''.join(links))
            links = [
                dom_parser.parse_dom(i, 'a', req='href') for i in links
                if re.findall('(.+?)\s*\(\d+\)\s*<', i)
            ]
            links = [i[0].attrs['href'] for i in links if i]

            url = re.sub('/streams-\d+', '', url)

            for link in links:
                if '/englisch/' in link: continue
                control.sleep(3000)
                if link != url:
                    r = client.request(urlparse.urljoin(self.base_link, link))

                quality = 'SD'
                info = []

                detail = dom_parser.parse_dom(r,
                                              'th',
                                              attrs={'class': 'thlink'})
                detail = [
                    dom_parser.parse_dom(i, 'a', req='href') for i in detail
                ]
                detail = [(i[0].attrs['href'],
                           i[0].content.replace('&#9654;', '').strip())
                          for i in detail if i]

                if detail:
                    quality, info = source_utils.get_release_quality(
                        detail[0][1])
                    r = client.request(
                        urlparse.urljoin(self.base_link, detail[0][0]))

                r = dom_parser.parse_dom(r, 'table')
                r = [
                    dom_parser.parse_dom(i, 'a', req=['href', 'title'])
                    for i in r if not dom_parser.parse_dom(i, 'table')
                ]
                r = [(l.attrs['href'], l.attrs['title']) for i in r for l in i
                     if l.attrs['title']]

                info = ' | '.join(info)

                for stream_link, hoster in r:
                    valid, hoster = source_utils.is_host_valid(
                        hoster, hostDict)
                    if not valid: continue

                    direct = False

                    if hoster.lower() == 'gvideo':
                        direct = True

                    sources.append({
                        'source': hoster,
                        'quality': quality,
                        'language': 'de',
                        'url': stream_link,
                        'info': info,
                        'direct': direct,
                        'debridonly': False,
                        'checkquality': True
                    })

            return sources
        except:
            return sources
Пример #2
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            url = urlparse.urljoin(
                self.base_link,
                self.search_link % urllib.quote_plus(cleantitle.query(title)))

            if 'tvshowtitle' in data:
                html = self.scraper.get(url).content

                match = re.compile(
                    'class="post-item.+?href="(.+?)" title="(.+?)"',
                    re.DOTALL).findall(html)
                for url, item_name in match:
                    if cleantitle.getsearch(title).lower(
                    ) in cleantitle.getsearch(item_name).lower():
                        season_url = '%02d' % int(data['season'])
                        episode_url = '%02d' % int(data['episode'])
                        sea_epi = 'S%sE%s' % (season_url, episode_url)

                        result = self.scraper.get(url).content
                        regex = re.compile('href="(.+?)"',
                                           re.DOTALL).findall(result)
                        for ep_url in regex:
                            if sea_epi in ep_url:
                                quality, info = source_utils.get_release_quality(
                                    url)
                                sources.append({
                                    'source': 'CDN',
                                    'quality': quality,
                                    'language': 'en',
                                    'url': ep_url,
                                    'direct': False,
                                    'debridonly': False
                                })
            else:
                html = self.scraper.get(url).content
                match = re.compile(
                    '<div class="thumbnail".+?href="(.+?)" title="(.+?)"',
                    re.DOTALL).findall(html)

                for url, item_name in match:
                    if cleantitle.getsearch(title).lower(
                    ) in cleantitle.getsearch(item_name).lower():
                        quality, info = source_utils.get_release_quality(url)
                        result = self.scraper.get(url).content
                        regex = re.compile('href="/download.php.+?link=(.+?)"',
                                           re.DOTALL).findall(result)

                        for link in regex:
                            if 'server=' not in link:
                                try:
                                    link = base64.b64decode(link)
                                except Exception:
                                    pass
                                try:
                                    host = link.split('//')[1].replace(
                                        'www.', '')
                                    host = host.split('/')[0].lower()
                                except Exception:
                                    pass
                                if not self.filter_host(host):
                                    continue
                                sources.append({
                                    'source': host,
                                    'quality': quality,
                                    'language': 'en',
                                    'url': link,
                                    'direct': False,
                                    'debridonly': False
                                })

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('ExtraMovie - Exception: \n' + str(failure))
            return sources
Пример #3
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            try:
                r = self.scraper.get(url).content
                posts = client.parseDOM(r, 'li')

                for post in posts:
                    link = re.findall(
                        'a title="Download using magnet" href="(magnet:.+?)"',
                        post, re.DOTALL)

                    for url in link:
                        url = url.split('&tr')[0]

                        if any(x in url.lower() for x in [
                                'french', 'italian', 'spanish', 'truefrench',
                                'dublado', 'dubbed'
                        ]):
                            continue

                        name = url.split('&dn=')[1]

                        if name.startswith('www.'):
                            try:
                                name = name.split(' - ')[1].lstrip()
                            except:
                                name = re.sub(r'\www..+? ', '', name)

                        t = name.split(hdlr)[0].replace(
                            data['year'],
                            '').replace('(',
                                        '').replace(')',
                                                    '').replace('&', 'and')
                        if cleantitle.get(t) != cleantitle.get(title):
                            continue

                        if hdlr not in name:
                            continue

                        quality, info = source_utils.get_release_quality(url)

                        try:
                            size = re.findall(
                                '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                                post)[0]
                            div = 1 if size.endswith('GB') else 1024
                            size = float(
                                re.sub('[^0-9|/.|/,]', '',
                                       size.replace(',', '.'))) / div
                            size = '%.2f GB' % size
                            info.append(size)
                        except:
                            pass

                        info = ' | '.join(info)

                        sources.append({
                            'source': 'torrent',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': True
                        })
            except:
                source_utils.scraper_error('BTDB')
                return

            return sources

        except:
            source_utils.scraper_error('BTDB')
            return sources
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(
             data['episode'])) if 'tvshowtitle' in data else data['year']
         category = '+category%3ATV' if 'tvshowtitle' in data else '+category%3AMovies'
         query = '%s S%02dE%02d' % (
             data['tvshowtitle'], int(data['season']),
             int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                 data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query)
         url = self.search_link % urllib.quote_plus(query)
         url = urlparse.urljoin(self.base_link, url) + str(category)
         html = client.request(url)
         html = html.replace('&nbsp;', ' ')
         try:
             results = \
                 client.parseDOM(html, 'table', attrs={'class': 'table table-condensed table-torrents vmiddle'})[0]
         except Exception:
             return sources
         rows = re.findall('<tr(.+?)</tr>', results, re.DOTALL)
         if rows is None:
             return sources
         for entry in rows:
             try:
                 try:
                     name = re.findall('<a class=".+?>(.+?)</a>', entry,
                                       re.DOTALL)[0]
                     name = client.replaceHTMLCodes(name).replace(
                         '<hl>', '').replace('</hl>', '')
                     if not cleantitle.get(title) in cleantitle.get(name):
                         continue
                 except Exception:
                     continue
                 y = re.findall(
                     '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                     name)[-1].upper()
                 if not y == hdlr:
                     continue
                 try:
                     seeders = int(
                         re.findall(
                             'class="progress prog trans90" title="Seeders: (.+?) \|',
                             entry, re.DOTALL)[0])
                 except Exception:
                     continue
                 if self.min_seeders > seeders:
                     continue
                 try:
                     link = 'magnet:%s' % (re.findall(
                         'href="magnet:(.+?)"', entry, re.DOTALL)[0])
                     link = str(
                         client.replaceHTMLCodes(link).split('&tr')[0])
                 except Exception:
                     continue
                 quality, info = source_utils.get_release_quality(
                     name, name)
                 try:
                     size = re.findall(
                         '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                         entry)[-1]
                     div = 1 if size.endswith(('GB', 'GiB')) else 1024
                     size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                     size = '%.2f GB' % size
                     info.append(size)
                 except Exception:
                     pass
                 info = ' | '.join(info)
                 sources.append({
                     'source': 'Torrent',
                     'quality': quality,
                     'language': 'en',
                     'url': link,
                     'info': info,
                     'direct': False,
                     'debridonly': True
                 })
             except Exception:
                 continue
         check = [i for i in sources if not i['quality'] == 'CAM']
         if check:
             sources = check
         return sources
     except Exception:
         return self._sources
Пример #5
0
	def get_sources(self, link):
		try:
			url = re.compile('href="(.+?)"').findall(link)[0]
			url = '%s%s' % (self.base_link, url)
			result = client.request(url)
			if result is None:
				return
			if 'magnet' not in result:
				return

			url = 'magnet:%s' % (re.findall('a href="magnet:(.+?)"', result, re.DOTALL)[0])
			try:
				url = unquote_plus(url).decode('utf8').replace('&amp;', '&').replace(' ', '.')
			except:
				pass
			url = url.split('&xl=')[0]
			if url in str(self.sources):
				return

			hash = re.compile('btih:(.*?)&').findall(url)[0]

			name = url.split('&dn=')[1]
			name = re.sub('[^A-Za-z0-9]+', '.', name).lstrip('.')
			if name.startswith('www'):
				try:
					name = re.sub(r'www(.*?)\W{2,10}', '', name)
				except:
					name = name.split('-.', 1)[1].lstrip()

			if source_utils.remove_lang(name):
				return

			match = source_utils.check_title(self.title, name, self.hdlr, self.year)
			if not match:
				return

			try:
				seeders = int(re.findall(r'<b>Seeds: </b>.*?>([0-9]+|[0-9]+,[0-9]+)</font>', result, re.DOTALL)[0].replace(',', ''))
				if self.min_seeders > seeders:
					return
			except:
				seeders = 0
				pass

			quality, info = source_utils.get_release_quality(name, url)

			try:
				size = re.findall(r'<b>Total Size:</b></td><td>(.*?)</td>', result, re.DOTALL)[0].strip()
				dsize, isize = source_utils._size(size)
				info.insert(0, isize)
			except:
				dsize = 0
				pass

			info = ' | '.join(info)

			self.sources.append({'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
											'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
		except:
			source_utils.scraper_error('ETTV')
			pass
Пример #6
0
	def sources(self, url, hostDict, hostprDict):
		scraper = cfscrape.create_scraper()
		sources = []
		try:
			if url is None:
				return sources

			if debrid.status() is False:
				return sources

			data = parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			q = '%s' % cleantitle.get_gan_url(data['title'])
			url = self.base_link + self.search_link % q
			# log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

			r = scraper.get(url).content
			v = re.compile('<a href="(.+?)" class="ml-mask jt" title="View(.+?)">\s+<span class=".+?">(.+?)</span>').findall(r)
			t = '%s (%s)' % (data['title'], data['year'])

			for url, name, qual in v:
				if t not in name:
					continue
				item = client.request(url)
				item = client.parseDOM(item, 'div', attrs={'class': 'mvici-left'})[0]
				details = re.compile('<strong>Movie Source.*\s*.*/Person">(.*)</').findall(item)[0]

				name = re.sub('[^A-Za-z0-9]+', '.', name).lstrip('.')
				if source_utils.remove_lang(name):
					continue

				key = url.split('-hd')[1]
				r = scraper.get('https://soapgate.online/moviedownload.php?q=' + key).content
				r = re.compile('<a rel=".+?" href="(.+?)" target=".+?">').findall(r)

				for url in r:
					if any(x in url for x in ['.rar', '.zip', '.iso']):
						continue

					quality, info = source_utils.get_release_quality(qual)

					try:
						size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', item)[0]
						dsize, isize = source_utils._size(size)
						info.insert(0, isize)
					except:
						dsize = 0
						pass

					fileType = source_utils.getFileType(details)
					info.append(fileType)
					info = ' | '.join(info) if fileType else info[0]

					valid, host = source_utils.is_host_valid(url, hostDict)
					if not valid:
						continue

					sources.append({'source': host, 'quality': quality, 'info': info, 'language': 'en', 'url': url, 'direct': False,
										'debridonly': True, 'size': dsize})
			return sources
		except:
			source_utils.scraper_error('GANOOL')
			return sources
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         aliases = eval(data['aliases'])
         if 'tvshowtitle' in data:
             ep = data['episode']
             url = '%s/film/%s-season-%01d/watching.html?ep=%s' % (
                 self.base_link, cleantitle.geturl(
                     data['tvshowtitle']), int(data['season']), ep)
             r = client.request(url, timeout='10', output='geturl')
             if url is None:
                 url = self.searchShow(data['tvshowtitle'], data['season'],
                                       aliases)
         else:
             url = self.searchMovie(data['title'], data['year'], aliases)
             if url is None:
                 url = '%s/film/%s/watching.html?ep=0' % (
                     self.base_link, cleantitle.geturl(data['title']))
         if url is None:
             raise Exception()
         r = client.request(url, timeout='10')
         r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
         if 'tvshowtitle' in data:
             ep = data['episode']
             links = client.parseDOM(r,
                                     'a',
                                     attrs={'episode-data': ep},
                                     ret='player-data')
         else:
             links = client.parseDOM(r, 'a', ret='player-data')
         for link in links:
             link = "https:" + link if not link.startswith('http') else link
             if 'vidcloud' in link:
                 r = client.request(link, timeout='10')
                 match = getSum.findSum(r)
                 for url in match:
                     url = "https:" + url if not url.startswith(
                         'http') else url
                     url = requests.get(
                         url).url if 'api.vidnode' in url else url
                     valid, host = source_utils.is_host_valid(url, hostDict)
                     if valid:
                         quality, info = source_utils.get_release_quality(
                             url, url)
                         sources.append({
                             'source': host,
                             'quality': quality,
                             'language': 'en',
                             'info': info,
                             'url': url,
                             'direct': False,
                             'debridonly': False
                         })
             else:
                 valid, host = source_utils.is_host_valid(link, hostDict)
                 if valid:
                     quality, info = source_utils.get_release_quality(
                         link, link)
                     sources.append({
                         'source': host,
                         'quality': quality,
                         'language': 'en',
                         'info': info,
                         'url': link,
                         'direct': False,
                         'debridonly': False
                     })
         return sources
     except:
         return sources
Пример #8
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
            if debrid.status() == False: raise Exception()
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']), int(data['episode'])) \
                if 'tvshowtitle' in data else '%s' % (data['title'])
            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url).replace('-', '+')
            r = self.scraper.get(url).content
            posts = client.parseDOM(r,
                                    "div",
                                    attrs={"class": "postpage_movie_download"})
            hostDict = hostprDict + hostDict
            items = []
            for post in posts:
                try:
                    u = client.parseDOM(post, 'a', ret='href')
                    for i in u:
                        items.append(i)
                except:
                    pass

            seen_urls = set()
            for item in items:
                try:
                    i = str(item)
                    r = client.request(i)
                    u = client.parseDOM(r,
                                        "div",
                                        attrs={"class": "multilink_lnks"})
                    for t in u:
                        r = client.parseDOM(t, 'a', ret='href')
                        for url in r:
                            if 'www.share-online.biz' in url:
                                continue
                            if url in seen_urls:
                                continue
                            seen_urls.add(url)
                            quality, info = source_utils.get_release_quality(
                                url)
                            if 'SD' in quality:
                                continue
                            valid, host = source_utils.is_host_valid(
                                url, hostDict)
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': True
                            })
                except:
                    pass
            return sources
        except:
            return
Пример #9
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            try:
                r = client.request(url)
                if '<tbody' not in r:
                    return sources

                posts = client.parseDOM(r, 'tbody')[0]
                posts = client.parseDOM(posts, 'tr')

                for post in posts:
                    link = re.findall('a href="(magnet:.+?)" title="(.+?)"',
                                      post, re.DOTALL)

                    try:
                        size = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                            post)[0]
                        dsize, isize = source_utils._size(size)
                    except:
                        isize = '0'
                        dsize = 0

                    for url, ref in link:
                        url = url.split('&tr')[0]

                        name = url.split('&dn=')[1]
                        name = urllib.unquote_plus(name).replace(' ', '.')
                        if source_utils.remove_lang(name):
                            continue

                        if name.startswith('www.'):
                            try:
                                name = name.split(' - ')[1].lstrip()
                            except:
                                name = re.sub(r'\www..+? ', '', name)

                        if 'extramovies.wiki' in name.lower():
                            name = name.split(' - ')[1].lstrip()

                        t = name.split(hdlr)[0].replace(
                            data['year'],
                            '').replace('(', '').replace(')', '').replace(
                                '&', 'and').replace('.US.',
                                                    '.').replace('.us.', '.')
                        if cleantitle.get(t) != cleantitle.get(title):
                            continue

                        if hdlr not in name:
                            continue

                        quality, info = source_utils.get_release_quality(
                            name, url)

                        info.insert(0, isize)
                        info = ' | '.join(info)

                        sources.append({
                            'source': 'torrent',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': True,
                            'size': dsize
                        })

                return sources

            except:
                source_utils.scraper_error('SKYTORRENTS')
                return sources

        except:
            source_utils.scraper_error('SKYTORRENTS')
            return sources
Пример #10
0
    def sources_packs(self,
                      url,
                      hostDict,
                      hostprDict,
                      search_series=False,
                      total_seasons=None,
                      bypass_filter=False):
        sources = []
        self.bypass_filter = bypass_filter

        if search_series:  # torrentapi does not have showPacks
            return sources
        try:
            if url is None:
                return sources
            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data['tvshowtitle'].replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.year = data['year']
            self.season_x = data['season']
            self.season_xx = self.season_x.zfill(2)

            query = re.sub('[^A-Za-z0-9\s\.-]+', '', self.title)
            search_link = self.tvsearch.format(
                self.key, quote_plus(query + ' S%s' % self.season_xx))
            # log_utils.log('search_link = %s' % str(search_link), __name__, log_utils.LOGDEBUG)

            time.sleep(2.1)
            rjson = client.request(search_link, error=True)
            if not rjson or not 'torrent_results' in str(rjson):
                return sources

            files = json.loads(rjson)['torrent_results']
            for file in files:
                url = file["download"]
                url = url.split('&tr')[0]
                hash = re.compile('btih:(.*?)&').findall(url)[0]

                name = file["title"]
                name = unquote_plus(name)
                name = source_utils.clean_name(self.title, name)
                if source_utils.remove_lang(name):
                    continue

                if not self.bypass_filter:
                    if not source_utils.filter_season_pack(
                            self.title, self.aliases, self.year, self.season_x,
                            name):
                        continue
                package = 'season'

                try:
                    seeders = int(file["seeders"])
                    if self.min_seeders > seeders:
                        continue
                except:
                    seeders = 0
                    pass

                quality, info = source_utils.get_release_quality(name, name)

                try:
                    dsize, isize = source_utils.convert_size(file["size"],
                                                             to='GB')
                    info.insert(0, isize)
                except:
                    dsize = 0
                    pass

                info = ' | '.join(info)

                sources.append({
                    'source': 'torrent',
                    'seeders': seeders,
                    'hash': hash,
                    'name': name,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'package': package
                })
            return sources
        except:
            source_utils.scraper_error('TORRENTAPI')
            return sources
Пример #11
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'],
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                data['title'],
                data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query)
            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            html = client.request(url)
            html = html.replace('&nbsp;', ' ')
            try:
                results = client.parseDOM(html, 'table', attrs={'id': 'searchResult'})[0]
            except Exception:
                return sources
            rows = re.findall('<tr(.+?)</tr>', results, re.DOTALL)
            if rows is None:
                return sources

            for entry in rows:
                try:
                    try:
                        name = re.findall('class="detLink" title=".+?">(.+?)</a>', entry, re.DOTALL)[0]
                        name = client.replaceHTMLCodes(name)
                        # t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name, flags=re.I)
                        if not cleantitle.get(title) in cleantitle.get(name):
                            continue
                    except Exception:
                        continue
                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()
                    if not y == hdlr:
                        continue

                    try:
                        seeders = int(re.findall('<td align="right">(.+?)</td>', entry, re.DOTALL)[0])
                    except Exception:
                        continue
                    if self.min_seeders > seeders:
                        continue

                    try:
                        link = 'magnet:%s' % (re.findall('a href="magnet:(.+?)"', entry, re.DOTALL)[0])
                        link = str(client.replaceHTMLCodes(link).split('&tr')[0])
                    except Exception:
                        continue

                    quality, info = source_utils.get_release_quality(name, name)

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', entry)[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except Exception:
                        pass

                    info = ' | '.join(info)
                    sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en',
                                    'url': link, 'info': info, 'direct': False, 'debridonly': True})
                except Exception:
                    failure = traceback.format_exc()
                    log_utils.log('TPB - Cycle Broken: \n' + str(failure))
                    continue

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check:
                sources = check

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('TPB - Exception: \n' + str(failure))
            return sources
Пример #12
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('[^A-Za-z0-9\s\.-]+', '', query)

            if 'tvshowtitle' in data:
                search_link = self.tvsearch.format(self.key, quote_plus(query))
            else:
                search_link = self.msearch.format(self.key, data['imdb'])
            # log_utils.log('search_link = %s' % search_link, log_utils.LOGDEBUG)

            time.sleep(2.1)
            rjson = client.request(search_link, error=True)
            if not rjson or not 'torrent_results' in str(rjson):
                return sources

            files = json.loads(rjson)['torrent_results']

            for file in files:
                url = file["download"]
                url = url.split('&tr')[0]
                hash = re.compile('btih:(.*?)&').findall(url)[0]

                name = file["title"]
                name = unquote_plus(name)
                name = source_utils.clean_name(title, name)
                if source_utils.remove_lang(name, episode_title):
                    continue

                if not source_utils.check_title(title, aliases, name, hdlr,
                                                data['year']):
                    continue

                # filter for episode multi packs (ex. S01E01-E17 is also returned in query)
                if episode_title:
                    if not source_utils.filter_single_episodes(hdlr, name):
                        continue

                try:
                    seeders = int(file["seeders"])
                    if self.min_seeders > seeders:
                        continue
                except:
                    seeders = 0
                    pass

                quality, info = source_utils.get_release_quality(name, name)

                try:
                    dsize, isize = source_utils.convert_size(file["size"],
                                                             to='GB')
                    info.insert(0, isize)
                except:
                    dsize = 0
                    pass

                info = ' | '.join(info)

                sources.append({
                    'source': 'torrent',
                    'seeders': seeders,
                    'hash': hash,
                    'name': name,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize
                })
            return sources
        except:
            source_utils.scraper_error('TORRENTAPI')
            return sources
Пример #13
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []

			if url is None:
				return sources

			if debrid.status() is False:
				return sources

			hostDict = hostprDict + hostDict

			data = parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU')

			hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

			query = '%s %s' % (title, hdlr)
			query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

			url = self.search_link % quote_plus(query)
			url = urljoin(self.base_link, url).replace('-', '+')
			# log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

			r = client.request(url)
			posts = client.parseDOM(r, "table", attrs={"class": "download"})
			if posts == []:
				return sources

			for post in posts:
				items = zip(client.parseDOM(post, 'a', ret='title'), client.parseDOM(post, 'a', ret='href'))

				for item in items:
					try:
						name = item[0].replace(' ', '.')

						t = name.split(hdlr)[0].replace(data['year'], '').replace('(', '').replace(')', '').replace('&', 'and')
						if cleantitle.get(t) != cleantitle.get(title):
							continue

						if hdlr not in name:
							continue

						if source_utils.remove_lang(name):
							continue

						i = str(item[1])
						i = self.base_link + i
						r = client.request(i)
						u = client.parseDOM(r, "div", attrs={"class": "dl-links"})

						for t in u:
							r = zip(re.compile("a href=.+? dl\W+'(.+?)'\W+").findall(t), re.findall('>.\((.+?Mb)\)', t))

							for link in r:
								url = link[0]

								if any(x in url for x in ['.rar', '.zip', '.iso', '.sample.']):
									continue

								if url in str(sources):
									continue

								quality, info = source_utils.get_release_quality(name, url)

								try:
									dsize, isize = source_utils._size(link[1])
									info.insert(0, isize)
								except:
									dsize = 0
									pass

								info = ' | '.join(info)

								valid, host = source_utils.is_host_valid(url, hostDict)
								if not valid:
									continue

								host = client.replaceHTMLCodes(host)
								try:
									host = host.encode('utf-8')
								except:
									pass

								sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url,
															'info': info, 'direct': False, 'debridonly': True, 'size': dsize})

					except:
						source_utils.scraper_error('DDLSPOT')
						pass

			return sources
		except:
			source_utils.scraper_error('DDLSPOT')
			return
Пример #14
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            query = '%s %s' % (data['title'], data['year'])

            url = self.search_link % urllib.quote(query)
            url = urlparse.urljoin(self.base_link, url)
            html = client.request(url)

            try:
                results = client.parseDOM(html, 'div', attrs={'class':
                                                              'row'})[2]
            except Exception:
                return sources

            items = re.findall(
                'class="browse-movie-bottom">(.+?)</div>\s</div>', results,
                re.DOTALL)
            if items is None:
                return sources

            for entry in items:
                try:
                    try:
                        link, name = re.findall(
                            '<a href="(.+?)" class="browse-movie-title">(.+?)</a>',
                            entry, re.DOTALL)[0]
                        name = client.replaceHTMLCodes(name)
                        if not cleantitle.get(name) == cleantitle.get(
                                data['title']):
                            continue
                    except Exception:
                        continue
                    y = entry[-4:]
                    if not y == data['year']:
                        continue

                    response = client.request(link)
                    try:
                        entries = client.parseDOM(
                            response, 'div', attrs={'class': 'modal-torrent'})
                        for torrent in entries:
                            link, name = re.findall(
                                'href="magnet:(.+?)" class="magnet-download download-torrent magnet" title="(.+?)"',
                                torrent, re.DOTALL)[0]
                            link = 'magnet:%s' % link
                            link = str(
                                client.replaceHTMLCodes(link).split('&tr')[0])
                            quality, info = source_utils.get_release_quality(
                                name, name)
                            try:
                                size = re.findall(
                                    '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                                    torrent)[-1]
                                div = 1 if size.endswith(
                                    ('GB', 'GiB')) else 1024
                                size = float(re.sub('[^0-9|/.|/,]', '',
                                                    size)) / div
                                size = '%.2f GB' % size
                                info.append(size)
                            except Exception:
                                pass
                            info = ' | '.join(info)
                            sources.append({
                                'source': 'Torrent',
                                'quality': quality,
                                'language': 'en',
                                'url': link,
                                'info': info,
                                'direct': False,
                                'debridonly': True
                            })
                    except Exception:
                        continue
                except Exception:
                    continue

            return sources
        except Exception:
            return sources
Пример #15
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []

			if url is None:
				return sources

			if debrid.status() is False:
				raise Exception()

			hostDict = hostprDict + hostDict

			data = urlparse.parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

			query = '%s %s' % (title, hdlr)
			query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

			url = self.search_link % urllib.quote_plus(query)
			url = urlparse.urljoin(self.base_link, url)
			# log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

			html = client.request(url)

			posts = client.parseDOM(html, 'item')

			items = []
			for post in posts:
				try:
					t = client.parseDOM(post, 'title')[0]
					u = client.parseDOM(post, 'a', ret='href')
					s = re.search('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', post)
					s = s.groups()[0] if s else '0'
					items += [(t, i, s) for i in u]
				except:
					source_utils.scraper_error('300MBDOWNLOAD')
					pass

			for item in items:
				try:
					name = item[0]
					name = client.replaceHTMLCodes(name)

					t = name.split(self.hdlr)[0].replace(self.year, '').replace('(', '').replace(')', '')
					if cleantitle.get(t) != cleantitle.get(title):
						continue

					if self.hdlr not in name:
						continue

					url = item[1]
					if any(x in url for x in ['.rar', '.zip', '.iso']):
						continue

					url = client.replaceHTMLCodes(url)
					url = url.encode('utf-8')

					if url in str(sources):
						continue

					valid, host = source_utils.is_host_valid(url, hostDict)
					if not valid:
						continue

					host = client.replaceHTMLCodes(host)
					host = host.encode('utf-8')

					quality, info = source_utils.get_release_quality(name, url)

					try:
						size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', item[2])[-1]
						div = 1 if size.endswith(('GB', 'GiB')) else 1024
						size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
						size = '%.2f GB' % size
						info.append(size)
					except:
						pass

					fileType = source_utils.getFileType(name)
					info.append(fileType)
					info = ' | '.join(info) if fileType else info[0]

					sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url,
					                'info': info, 'direct': False, 'debridonly': True})
				except:
					source_utils.scraper_error('300MBDOWNLOAD')
					pass

			return sources
		except:
			source_utils.scraper_error('300MBDOWNLOAD')
			return
Пример #16
0
    def sources(self, url, hostDict, hostprDict):
        scraper = cfscrape.create_scraper()
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title']
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            year = data['year']

            query = re.sub('[^A-Za-z0-9\s\.-]+', '', title)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)
            try:
                r = scraper.get(url).content
                if not r:
                    return sources
                if any(value in str(r) for value in
                       ['No movies found', 'something went wrong']):
                    return sources
                r = json.loads(r)

                id = ''
                for i in r:
                    if i['original_title'] == title and i[
                            'release_date'] == year:
                        id = i['id']
                        break
                if id == '':
                    return sources
                link = '%s%s%s' % (self.base_link, '/movies/torrents?id=', id)

                result = scraper.get(link).content
                if 'magnet' not in result:
                    return sources
                result = re.sub(r'\n', '', result)
                links = re.findall(
                    r'<tr>.*?<a title="Download:\s*(.+?)"href="(magnet:.+?)">.*?title="File Size">\s*(.+?)\s*</td>.*?title="Seeds">([0-9]+|[0-9]+,[0-9]+)\s*<',
                    result)

                for link in links:
                    name = link[0]
                    name = unquote_plus(name)
                    name = source_utils.clean_name(title, name)
                    if source_utils.remove_lang(name, episode_title):
                        continue

                    if not source_utils.check_title(title.replace('&', 'and'),
                                                    aliases, name, year, year):
                        continue

                    url = link[1]
                    try:
                        url = unquote_plus(url).decode('utf8').replace(
                            '&amp;', '&').replace(' ', '.')
                    except:
                        url = unquote_plus(url).replace('&amp;',
                                                        '&').replace(' ', '.')
                    url = url.split('&tr')[0]
                    hash = re.compile('btih:(.*?)&').findall(url)[0]

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = link[2]
                        dsize, isize = source_utils._size(size)
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    info = ' | '.join(info)

                    try:
                        seeders = int(link[3].replace(',', ''))
                        if self.min_seeders > seeders:
                            continue
                    except:
                        seeders = 0
                        pass

                    sources.append({
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize
                    })
                return sources
            except:
                source_utils.scraper_error('MOVIEMAGNET')
                return sources
        except:
            source_utils.scraper_error('MOVIEMAGNET')
            return sources
Пример #17
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            hostDict = hostprDict + hostDict

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            html = client.request(url)
            posts = client.parseDOM(html, 'item')

            items = []
            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]
                    u = client.parseDOM(post, 'enclosure', ret='url')
                    #---rss feed does not contain size info-another reason why switching to site search be better
                    s = re.search(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                        post)
                    s = s.groups()[0] if s else '0'
                    items += [(t, i, s) for i in u]
                except:
                    pass

            for item in items:
                try:
                    url = item[1]
                    if any(x in url
                           for x in ['.rar', '.zip', '.iso', '.part']):
                        continue

                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid:
                        continue

                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    # some shows like "Power" have year and hdlr in name
                    t = name.split(hdlr)[0].replace(data['year'], '').replace(
                        '(', '').replace(')', '').replace('&', 'and')
                    if cleantitle.get(t) != cleantitle.get(title):
                        continue

                    if hdlr not in name:
                        continue

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                            item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })

                except:
                    source_utils.scraper_error('MVRLS')
                    pass

            return sources
        except:
            source_utils.scraper_error('MVRLS')
            return sources
Пример #18
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            hostDict = hostprDict + hostDict

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
            log_utils.log('query = %s' % query, log_utils.LOGDEBUG)

            # query = query.replace("&", "and").replace("  ", " ").replace(" ", "-")
            # log_utils.log('query = %s' % query, log_utils.LOGDEBUG)

            query = query.replace("&", "and")
            query = re.sub('\s', '-', query)
            log_utils.log('query = %s' % query, log_utils.LOGDEBUG)

            url = self.search_link % urllib.quote_plus(query)
            log_utils.log('url = %s' % url, log_utils.LOGDEBUG)
            url = urlparse.urljoin(self.base_link, url)
            log_utils.log('url = %s' % url, log_utils.LOGDEBUG)
            url = "http://rlsbb.ru/" + query
            log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            if 'tvshowtitle' not in data:
                url = url + "-1080p"
            log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            r = self.scraper.get(url).content
            # log_utils.log('r = %s' % r, log_utils.LOGDEBUG)

            if r is None and 'tvshowtitle' in data:
                season = re.search('S(.*?)E', hdlr)
                season = season.group(1)
                query = title
                query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
                query = query + "-S" + season
                query = query.replace("&", "and")
                query = query.replace("  ", " ")
                query = query.replace(" ", "-")
                url = "http://rlsbb.ru/" + query
                r = self.scraper.get(url).content

            posts = client.parseDOM(r, "div", attrs={"class": "content"})

            items = []
            for post in posts:
                try:
                    u = client.parseDOM(post, 'a', ret='href')

                    for i in u:
                        try:
                            name = str(i)
                            tit = name.rsplit('/', 1)[1]
                            t = tit.split(hdlr)[0].replace(
                                data['year'], '').replace('(', '').replace(
                                    ')', '').replace('&', 'and')
                            if cleantitle.get(t) != cleantitle.get(title):
                                continue

                            if hdlr in name.upper():
                                items.append(name)
                        except:
                            source_utils.scraper_error('RLSBB')
                            pass

                except:
                    source_utils.scraper_error('RLSBB')
                    pass

            seen_urls = set()
            for item in items:
                try:
                    info = []

                    url = str(item)
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    if url in seen_urls:
                        continue

                    seen_urls.add(url)

                    host = url.replace("\\", "")
                    host2 = host.strip('"')
                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(host2.strip().lower()).netloc)[0]

                    if not host in hostDict:
                        continue

                    if any(x in host2 for x in ['.rar', '.zip', '.iso']):
                        continue

                    quality, info = source_utils.get_release_quality(host2)

                    try:
                        size = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                            name)[0]
                        div = 1 if size.endswith('GB') else 1024
                        size = float(
                            re.sub('[^0-9|/.|/,]', '', size.replace(
                                ',', '.'))) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': host2,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })

                except:
                    source_utils.scraper_error('RLSBB')
                    pass

            return sources
        except:
            source_utils.scraper_error('RLSBB')
            return sources
Пример #19
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources
            if debrid.status() is False:
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s s%02de%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            try:
                r = client.request(url)
                posts = client.parseDOM(r, 'tbody')[0]
                posts = client.parseDOM(posts, 'tr')
                for post in posts:
                    link = re.findall('a href="(magnet:.+?)" title="(.+?)"',
                                      post, re.DOTALL)
                    try:
                        size = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                            post)[0]
                        div = 1 if size.endswith('GB') else 1024
                        size = float(
                            re.sub('[^0-9|/.|/,]', '', size.replace(
                                ',', '.'))) / div
                        size = '%.2f GB' % size
                    except BaseException:
                        size = '0'
                    for url, data in link:
                        if hdlr not in data:
                            continue
                        url = url.split('&tr')[0]
                        quality, info = source_utils.get_release_quality(data)
                        if any(x in url for x in [
                                'FRENCH', 'Ita', 'italian', 'TRUEFRENCH',
                                '-lat-', 'Dublado'
                        ]):
                            continue
                        info.append(size)
                        info = ' | '.join(info)
                        sources.append({
                            'source': 'Torrent',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': True
                        })
            except:
                return
            return sources
        except:
            return sources
Пример #20
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = urlparse.urljoin(self.base_link, self.search_link)
            url = url % urllib.quote_plus(query)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            r = client.request(url)
            if r is None:
                return sources
            if 'Nothing Found' in r:
                return sources

            r = client.parseDOM(r, 'article')
            r1 = client.parseDOM(r, 'h2')
            r2 = client.parseDOM(r, 'div', attrs={'class': 'entry-excerpt'})

            if 'tvshowtitle' in data:  # fuckers removed file size for episodes
                posts = zip(client.parseDOM(r1, 'a', ret='href'),
                            client.parseDOM(r1, 'a'))
            else:
                posts = zip(
                    client.parseDOM(r1, 'a', ret='href'),
                    client.parseDOM(r1, 'a'),
                    re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                        r2[0]))

            hostDict = hostprDict + hostDict

            items = []
            for post in posts:
                try:
                    base_u = client.request(post[0])

                    if 'tvshowtitle' in data:
                        regex = '<b>(' + title + '.*)</b>'
                        lists = zip(
                            re.findall(regex, base_u),
                            re.findall('<ul>(.+?)</ul>', base_u, re.DOTALL))
                        for links in lists:
                            u = re.findall('\'(http.+?)\'',
                                           links[1]) + re.findall(
                                               '\"(http.+?)\"', links[1])
                            t = links[0]
                            s = 0
                            items += [(t, i, s) for i in u]
                    else:
                        u = re.findall('\'(http.+?)\'', base_u) + re.findall(
                            '\"(http.+?)\"', base_u)
                        u = [i for i in u if '/embed/' not in i]
                        u = [i for i in u if 'youtube' not in i]

                        try:
                            t = post[1].encode('utf-8')
                        except:
                            t = post[1]
                        s = post[2]
                        items += [(t, i, s) for i in u]

                except:
                    source_utils.scraper_error('MYVIDEOLINK')
                    pass

            for item in items:
                try:
                    url = item[1]
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    if url.endswith(('.rar', '.zip', '.iso', '.part', '.png',
                                     '.jpg', '.bmp', '.gif')):
                        continue

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid:
                        continue

                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    name = item[0]

                    name = client.replaceHTMLCodes(name).replace(' ', '.')
                    match = source_utils.check_title(title, name, hdlr,
                                                     data['year'])
                    if not match:
                        continue

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                            item[2])[-1]
                        dsize, isize = source_utils._size(size)
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    fileType = source_utils.getFileType(name)
                    info.append(fileType)
                    info = ' | '.join(info) if fileType else info[0]

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize
                    })
                except:
                    source_utils.scraper_error('MYVIDEOLINK')
                    pass

            return sources
        except:
            source_utils.scraper_error('MYVIDEOLINK')
            return sources
Пример #21
0
    def sources(self, url, hostDict, hostprDict):

        api_key = self.get_api()

        if not api_key:
            return

        sources = []

        try:

            content_type = 'episode' if 'tvshowtitle' in url else 'movie'
            match = 'extended'
            moderated = 'no' if content_type == 'episode' else 'yes'
            search_in = ''

            if content_type == 'movie':
                title = url['title'].replace(':', ' ').replace(' ',
                                                               '+').replace(
                                                                   '&', 'and')
                title = title.replace("'", "")
                year = url['year']
                link = '@name+%s+%s+@files+%s+%s' % (title, year, title, year)

            elif content_type == 'episode':
                title = url['tvshowtitle'].replace(':', ' ').replace(
                    ' ', '+').replace('&', 'and')
                season = int(url['season'])
                episode = int(url['episode'])
                link = self.makeQuery(title, season, episode)

            s = requests.Session()
            link = self.base_link + self.meta_search_link % \
                   (api_key, link, match, moderated, search_in, self.search_limit)

            p = s.get(link)
            p = json.loads(p.text)

            if p['status'] != 'ok':
                return

            files = p['files']

            for i in files:
                if i['is_ready'] == '1' and i['type'] == 'video':
                    try:
                        source = 'SINGLE'
                        if int(i['files_num_video']) > 3:
                            source = 'PACK [B](x%02d)[/B]' % int(
                                i['files_num_video'])
                        file_name = i['name']
                        file_id = i['id']
                        file_dl = i['url_dl']
                        if content_type == 'episode':
                            url = '%s<>%s<>%s' % (file_id, season, episode)
                            details = self.details(file_name, i['size'],
                                                   i['video_info'])
                        else:
                            url = '%s<>%s<>%s+%s' % (file_id, 'movie', title,
                                                     year)
                            details = self.details(file_name, i['size'],
                                                   i['video_info']).split('|')
                            details = details[0] + ' | ' + file_name.replace(
                                '.', ' ')

                        quality = source_utils.get_release_quality(
                            file_name, file_dl)
                        sources.append({
                            'source': source,
                            'quality': quality[0],
                            'language': "en",
                            'url': url,
                            'info': details,
                            'direct': True,
                            'debridonly': False
                        })
                    except:
                        pass

                else:
                    continue

            return sources

        except:
            print("Unexpected error in Furk Script: source", sys.exc_info()[0])
            exc_type, exc_obj, exc_tb = sys.exc_info()
            print(exc_type, exc_tb.tb_lineno)
            pass
Пример #22
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            try:
                r = client.request(url)

                posts = client.parseDOM(r, 'div', attrs={'class':
                                                         'results'})[0]
                posts = client.parseDOM(posts, 'dl')

                for post in posts:
                    links = re.findall('<dt><a href=/(.+)</a>', post,
                                       re.DOTALL)

                    for link in links:
                        magnet = link.split('</a>')[0]
                        hash = 'magnet:?xt=urn:btih:' + magnet.split('>')[0]
                        dn = '&dn=' + magnet.split('>')[1]
                        url = hash + dn

                        name = url.split('&dn=')[1]
                        name = urllib.unquote_plus(name).replace(' ', '.')
                        if source_utils.remove_lang(name):
                            continue

                        t = name.split(hdlr)[0].replace(
                            data['year'],
                            '').replace('(', '').replace(')', '').replace(
                                '&', 'and').replace('.US.',
                                                    '.').replace('.us.', '.')
                        if cleantitle.get(t) != cleantitle.get(title):
                            continue

                        if hdlr not in name:
                            continue

                        quality, info = source_utils.get_release_quality(
                            name, url)

                        try:
                            size = re.findall(
                                '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                                post)[0]
                            div = 1 if size.endswith('GB') else 1024
                            size = float(
                                re.sub('[^0-9|/.|/,]', '',
                                       size.replace(',', '.'))) / div
                            size = '%.2f GB' % size
                            info.insert(0, size)
                        except:
                            pass

                        info = ' | '.join(info)

                        sources.append({
                            'source': 'torrent',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': True
                        })

                return sources

            except:
                source_utils.scraper_error('TORRENTZ')
                return

        except:
            source_utils.scraper_error('TORRENTZ')
            return sources
Пример #23
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            imdb = data['imdb']

            query = urlparse.urljoin(self.base_link, self.search_link)
            result = client.request(query)
            m = re.findall('Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse', result, re.DOTALL | re.I)
            m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
            if m:
                link = m
            else:
                query = urlparse.urljoin(self.base_link, self.search_link2)
                result = client.request(query)
                m = re.findall('Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse', result, re.DOTALL | re.I)
                m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
                if m:
                    link = m
                else:
                    query = urlparse.urljoin(self.base_link, self.search_link3)
                    result = client.request(query)
                    m = re.findall('Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse', result,
                                   re.DOTALL | re.I)
                    m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
                    if m: link = m

            for item in link:
                try:

                    quality, info = source_utils.get_release_quality(item[2], None)

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[0], flags=re.I)[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    url = item[2]
                    if any(x in url.lower() for x in ['.rar.', '.zip.', '.iso.']) or any(
                            url.lower().endswith(x) for x in ['.rar', '.zip', '.iso']): raise Exception()

                    if any(x in url.lower() for x in ['youtube', 'sample', 'trailer']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    sources.append({'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                                    'direct': True, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Пример #24
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url == None:
                raise Exception()

            if not (self.api and not self.api == ''):
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            year = int(
                data['year']
            ) if 'year' in data and not data['year'] == None else None
            season = int(
                data['season']
            ) if 'season' in data and not data['season'] == None else None
            episode = int(
                data['episode']
            ) if 'episode' in data and not data['episode'] == None else None
            query = '%s S%02dE%02d' % (
                title, season,
                episode) if 'tvshowtitle' in data else '%s %d' % (title, year)

            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query += ' lang:%s' % self.language[0]
            query = urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, self.search_link)

            hostDict = hostprDict + hostDict

            iterations = self.streamLimit / self.streamIncrease
            last = self.streamLimit - (iterations * self.streamIncrease)
            if not last:
                iterations = iterations - 1
                last = self.streamIncrease
            iterations = iterations + 1

            seen_urls = set()
            for type in self.types:
                searchFrom = 0
                searchCount = self.streamIncrease
                for offset in range(iterations):
                    if iterations == offset + 1: searchCount = last
                    urlNew = url % (type, self.api, query, searchCount,
                                    searchFrom)
                    searchFrom = searchFrom + self.streamIncrease

                    results = client.request(urlNew)
                    results = json.loads(results)

                    apistatus = results['status']
                    if apistatus != 'success': break

                    results = results['result']

                    added = False
                    for result in results:
                        jsonName = result['title']
                        jsonSize = result['sizeinternal']
                        jsonExtension = result['extension']
                        jsonLanguage = result['lang']
                        jsonHoster = result['hostername'].lower()
                        jsonLink = result['hosterurls'][0]['url']

                        if jsonLink in seen_urls: continue
                        seen_urls.add(jsonLink)

                        if not jsonHoster in hostDict: continue

                        if not self.extensionValid(jsonExtension): continue

                        quality, info = source_utils.get_release_quality(
                            jsonName)
                        info.append(self.formatSize(jsonSize))
                        info.append(jsonName)
                        info = '|'.join(info)

                        sources.append({
                            'source': jsonHoster,
                            'quality': quality,
                            'language': jsonLanguage,
                            'url': jsonLink,
                            'info': info,
                            'direct': False,
                            'debridonly': False
                        })
                        added = True

                    if not added:
                        break

            return sources
        except:
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources
            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            html = self.scraper.get(url).content
            posts = client.parseDOM(html, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]
                    u = client.parseDOM(post, 'enclosure', ret='url')
                    s = re.search(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                        post)
                    s = s.groups()[0] if s else '0'
                    items += [(t, i, s) for i in u]
                except:
                    pass

            for item in items:
                try:

                    url = item[1]
                    if any(x in url
                           for x in ['.rar', '.zip', '.iso', '.part']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid:
                        raise Exception()

                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '',
                        name,
                        flags=re.I)

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()

                    y = re.findall(
                        '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                        name)[-1].upper()

                    if not y == hdlr:
                        raise Exception()

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                            item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    info = ' | '.join(info)

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check:
                sources = check

            return sources
        except:
            return sources
Пример #26
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title']
            hdlr = data['year']

            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', title)
            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)
            r = client.request(url)
            if not r:
                return sources
            posts = client.parseDOM(r, 'div', attrs={'class': 'video_title'})

            items = []
            for post in posts:
                try:
                    data = dom_parser.parse_dom(post,
                                                'a',
                                                req=['href', 'title'])[0]
                    t = data.content
                    y = re.findall('\({0,1}(\d{4})\){0,1}',
                                   data.attrs['title'])[0]
                    qual = data.attrs['title']
                    if '-' in qual:
                        qual = qual.split('-')[1]

                    link = data.attrs['href']
                    if cleantitle.get(t) != cleantitle.get(title):
                        continue
                    if y != hdlr:
                        continue

                    items += [(link, qual)]
                except:
                    source_utils.scraper_error('WATCH32')
                    pass

            for item in items:
                try:
                    r = client.request(item[0]) if item[0].startswith(
                        'http') else client.request(
                            urljoin(self.base_link, item[0]))

                    qual = client.parseDOM(r, 'h1')[0]
                    quality = source_utils.get_release_quality(item[1],
                                                               qual)[0]

                    url = re.findall('''frame_url\s*=\s*["']([^']+)['"]\;''',
                                     r, re.DOTALL)[0]
                    url = url if url.startswith('http') else urljoin(
                        'https://', url)

                    sources.append({
                        'source': 'gvideo',
                        'quality': quality,
                        'info': '',
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })

                except:
                    source_utils.scraper_error('WATCH32')
                    pass

            return sources
        except:
            source_utils.scraper_error('WATCH32')
            return sources
Пример #27
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            r = self.scraper.get(url).content

            posts = client.parseDOM(r, 'item')

            hostDict = hostprDict + hostDict

            items = []

            for post in posts:
                try:
                    t = client.parseDOM(post, 'title')[0]

                    c = client.parseDOM(post, 'content.+?')[0]

                    u = client.parseDOM(c, 'p')
                    u = [client.parseDOM(i, 'a', ret='href') for i in u]
                    u = [i[0] for i in u if len(i) == 1]
                    if not u: raise Exception()

                    if 'tvshowtitle' in data:
                        u = [(re.sub('(720p|1080p|2160p)', '', t) + ' ' + [x for x in i.strip('//').split('/')][-1], i)
                             for i in u]
                    else:
                        u = [(t, i) for i in u]

                    items += u
                except:
                    pass

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)

                    t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name)
                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()

                    y = re.findall('[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]', name)[-1].upper()

                    if not y == hdlr: raise Exception()

                    url = item[1]

                    quality, info = source_utils.get_release_quality(url, name)

                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[2])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except:
                        pass

                    if any(x in url for x in ['.rar', '.zip', '.iso']): raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if not host in hostDict: raise Exception()
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                                    'direct': False, 'debridonly': True})
                except:
                    pass

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return sources
Пример #28
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
         query = '%s S%02dE%02d' % (
             data['tvshowtitle'], int(data['season']),
             int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                 data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query)
         url = self.search_link % (urllib.quote_plus(query).replace(
             '+', '-'))
         url = urlparse.urljoin(self.base_link, url)
         html = client.request(url)
         try:
             results = client.parseDOM(
                 html, 'table', attrs={'class': 'forum_header_border'})
             for result in results:
                 if 'magnet:' in result:
                     results = result
                     break
         except Exception:
             return sources
         rows = re.findall(
             '<tr name="hover" class="forum_header_border">(.+?)</tr>',
             results, re.DOTALL)
         if rows is None:
             return sources
         for entry in rows:
             try:
                 try:
                     columns = re.findall('<td\s.+?>(.+?)</td>', entry,
                                          re.DOTALL)
                     derka = re.findall(
                         'href="magnet:(.+?)" class="magnet" title="(.+?)"',
                         columns[2], re.DOTALL)[0]
                     name = derka[1]
                     link = 'magnet:%s' % (str(
                         client.replaceHTMLCodes(derka[0]).split('&tr')[0]))
                     t = name.split(hdlr)[0]
                     if not cleantitle.get(re.sub(
                             '(|)', '', t)) == cleantitle.get(title):
                         continue
                 except Exception:
                     continue
                 y = re.findall(
                     '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                     name)[-1].upper()
                 if not y == hdlr:
                     continue
                 try:
                     seeders = int(
                         re.findall('<font color=".+?">(.+?)</font>',
                                    columns[5], re.DOTALL)[0])
                 except Exception:
                     continue
                 if self.min_seeders > seeders:
                     continue
                 quality, info = source_utils.get_release_quality(
                     name, name)
                 try:
                     size = re.findall(
                         '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                         name)[-1]
                     div = 1 if size.endswith(('GB', 'GiB')) else 1024
                     size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                     size = '%.2f GB' % size
                     info.append(size)
                 except Exception:
                     pass
                 info = ' | '.join(info)
                 sources.append({
                     'source': 'Torrent',
                     'quality': quality,
                     'language': 'en',
                     'url': link,
                     'info': info,
                     'direct': False,
                     'debridonly': True
                 })
             except Exception:
                 continue
         check = [i for i in sources if not i['quality'] == 'CAM']
         if check:
             sources = check
         return sources
     except Exception:
         return sources
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(
             data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s s%02de%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) \
             if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         url = urlparse.urljoin(
             self.base_link,
             self.search_link.format(query[0].lower(),
                                     cleantitle.geturl(query)))
         r = client.request(url)
         r = client.parseDOM(r, 'tbody')[0]
         posts = client.parseDOM(r, 'tr')
         posts = [i for i in posts if 'magnet:' in i]
         for post in posts:
             post = post.replace('&nbsp;', ' ')
             name = client.parseDOM(post, 'a', ret='title')[1]
             t = name.split(hdlr)[0]
             if not cleantitle.get(re.sub('(|)', '',
                                          t)) == cleantitle.get(title):
                 continue
             try:
                 y = re.findall(
                     '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                     name, re.I)[-1].upper()
             except BaseException:
                 y = re.findall(
                     '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name,
                     re.I)[-1].upper()
             if not y == hdlr: continue
             links = client.parseDOM(post, 'a', ret='href')
             magnet = [
                 i.replace('&amp;', '&') for i in links if 'magnet:' in i
             ][0]
             url = magnet.split('&tr')[0]
             quality, info = source_utils.get_release_quality(name, name)
             try:
                 size = re.findall(
                     '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                     post)[0]
                 div = 1 if size.endswith(('GB', 'GiB')) else 1024
                 size = float(
                     re.sub('[^0-9|/.|/,]', '', size.replace(',',
                                                             '.'))) / div
                 size = '%.2f GB' % size
             except BaseException:
                 size = '0'
             info.append(size)
             info = ' | '.join(info)
             sources.append({
                 'source': 'Torrent',
                 'quality': quality,
                 'language': 'en',
                 'url': url,
                 'info': info,
                 'direct': False,
                 'debridonly': True
             })
         return sources
     except BaseException:
         return sources
Пример #30
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else ('(' +
                                                                 data['year'] +
                                                                 ')')

            # query = '%s %s' % (title, hdlr)
            query = title
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            if 'tvshowtitle' in data:
                url = self.show_link % query.replace(' ', '-')
            else:
                url = self.search_link % quote_plus(query)

            url = urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, __name__, log_utils.LOGDEBUG)

            r = client.request(url)
            if not r:
                return sources
            if 'No results were found' in r:
                return sources

            r = client.parseDOM(r, 'div', attrs={'class': 'card'})
            for i in r:
                url = re.compile('href="(magnet.+?)\s*?"').findall(i)[0]
                try:
                    url = unquote_plus(url).decode('utf8').replace(
                        '&amp;', '&').replace(' ', '.')
                except:
                    url = unquote_plus(url).replace('&amp;',
                                                    '&').replace(' ', '.')
                url = url.split('&tr=')[0].replace(' ', '.')
                hash = re.compile('btih:(.*?)&').findall(url)[0]

                name = url.split('&dn=')[1]
                name = re.sub('[^A-Za-z0-9]+', '.', name).lstrip('.')
                if source_utils.remove_lang(name):
                    continue

                match = source_utils.check_title(
                    title, name,
                    hdlr.replace('(', '').replace(')', ''), data['year'])
                if not match:
                    continue

                seeders = 0  # seeders not available on topnow
                quality, info = source_utils.get_release_quality(name, url)

                try:
                    size = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', i
                    )[-1]  # file size is no longer available on topnow's new site
                    dsize, isize = source_utils._size(size)
                    info.insert(0, isize)
                except:
                    dsize = 0
                    pass

                info = ' | '.join(info)

                sources.append({
                    'source': 'torrent',
                    'seeders': seeders,
                    'hash': hash,
                    'name': name,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize
                })

            return sources
        except:
            source_utils.scraper_error('TOPNOW')
            return sources