예제 #1
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         query = self.tvsearch_link % quote_plus(
             cleantitle.query(tvshowtitle))
         query = urljoin(self.base_link, query.lower())
         result = client.request(query, referer=self.base_link)
         result = client.parseDOM(result,
                                  'div',
                                  attrs={'class': 'index_item.+?'})
         result = [(dom.parse_dom(i, 'a', req=['href', 'title'])[0])
                   for i in result if i]
         if not result:
             return
         result = [(
             i.attrs['href']
         ) for i in result if cleantitle.get(tvshowtitle) == cleantitle.get(
             re.sub(
                 '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                 '',
                 i.attrs['title'],
                 flags=re.I))]
         if not result: return
         else: result = result[0]
         url = client.replaceHTMLCodes(result)
         try:
             url = url.encode('utf-8')
         except:
             pass
         return url
     except:
         source_utils.scraper_error('PRIMEWIRE')
         return
예제 #2
0
	def get_sources(self, url):
		try:
			r = client.request(url)
			if not r:
				return
			div = client.parseDOM(r, 'div', attrs={'id': 'div2child'})

			for row in div:
				row = client.parseDOM(r, 'div', attrs={'class': 'resultdivbotton'})
				if not row:
					return

				for post in row:
					hash = re.findall('<div id="hideinfohash.+?" class="hideinfohash">(.+?)<', post, re.DOTALL)[0]
					name = re.findall('<div id="hidename.+?" class="hideinfohash">(.+?)<', post, re.DOTALL)[0]
					name = unquote_plus(name)
					name = source_utils.clean_name(self.title, name)
					if source_utils.remove_lang(name, self.episode_title):
						continue

					url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name)

					if url in str(self.sources):
						continue

					if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year):
						continue

					# filter for episode multi packs (ex. S01E01-E17 is also returned in query)
					if self.episode_title:
						if not source_utils.filter_single_episodes(self.hdlr, name):
							continue

					try:
						seeders = int(re.findall('<div class="resultdivbottonseed">([0-9]+|[0-9]+,[0-9]+)<', post, re.DOTALL)[0].replace(',', ''))
						if self.min_seeders > seeders:
							continue
					except:
						seeders = 0
						pass

					quality, info = source_utils.get_release_quality(name, url)

					try:
						size = re.findall('<div class="resultdivbottonlength">(.+?)<', post)[0]
						dsize, isize = source_utils._size(size)
						info.insert(0, isize)
					except:
						dsize = 0
						pass

					info = ' | '.join(info)

					self.sources.append({'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
													'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
		except:
			source_utils.scraper_error('IDOPE')
			pass
예제 #3
0
    def sources(self, url, hostDict, hostprDict):
        self.sources = []
        try:
            scraper = cfscrape.create_scraper()

            if url is None:
                return self.sources

            if debrid.status() is False:
                return self.sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.episode_title = data[
                'title'] if 'tvshowtitle' in data else None
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else data['year']
            self.year = data['year']

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub('[^A-Za-z0-9\s\.-]+', '', query)

            urls = []
            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            urls.append(url)
            # urls.append('%s%s' % (url, '&page=2')) # next page seems broken right now
            # urls.append('%s%s' % (url, '&page=3'))
            # log_utils.log('urls = %s' % urls, log_utils.LOGDEBUG)

            links = []
            for x in urls:
                r = scraper.get(x).content
                if not r:
                    continue
                list = client.parseDOM(r, 'tr', attrs={'class': 'tlr'})
                list += client.parseDOM(r, 'tr', attrs={'class': 'tlz'})
                for item in list:
                    links.append(item)

            threads = []
            for link in links:
                threads.append(workers.Thread(self.get_sources, link))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('EXTRATORRENT')
            return self.sources
예제 #4
0
    def sources_packs(self,
                      url,
                      hostDict,
                      hostprDict,
                      search_series=False,
                      total_seasons=None,
                      bypass_filter=False):
        self.sources = []
        try:
            self.search_series = search_series
            self.total_seasons = total_seasons
            self.bypass_filter = bypass_filter

            if url is None:
                return self.sources
            if debrid.status() is False:
                return self.sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data['tvshowtitle'].replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.imdb = data['imdb']
            self.year = data['year']
            self.season_x = data['season']
            self.season_xx = self.season_x.zfill(2)

            query = re.sub('[^A-Za-z0-9\s\.-]+', '', self.title)
            queries = [
                self.search_link % quote_plus(query + ' S%s' % self.season_xx),
                self.search_link %
                quote_plus(query + ' Season %s' % self.season_x)
            ]
            if search_series:
                queries = [
                    self.search_link % quote_plus(query + ' Season'),
                    self.search_link % quote_plus(query + ' Complete')
                ]

            threads = []
            for url in queries:
                link = urljoin(self.base_link, url)
                threads.append(workers.Thread(self.get_sources_packs, link))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('SKYTORRENTS')
            return self.sources
예제 #5
0
 def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
     try:
         url = {
             'imdb': imdb,
             'tvdb': tvdb,
             'tvshowtitle': tvshowtitle,
             'year': year
         }
         url = urlencode(url)
         return url
     except:
         source_utils.scraper_error('WATCHEPISODES')
         return
예제 #6
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         url = {
             'imdb': imdb,
             'title': title,
             'aliases': aliases,
             'year': year
         }
         url = urlencode(url)
         return url
     except:
         source_utils.scraper_error('EXTRATORRENT')
         return
예제 #7
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url is None:
             return
         url = parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         url['title'], url['premiered'], url['season'], url[
             'episode'] = title, premiered, season, episode
         url = urlencode(url)
         return url
     except:
         source_utils.scraper_error('EXTRATORRENT')
         return
예제 #8
0
	def get_sources(self, row):
		try:
			if 'magnet' not in row:
				return

			url = re.findall('href="(magnet:.+?)"', row, re.DOTALL)[0]
			url = unquote_plus(url).split('&tr')[0].replace('&amp;', '&').replace(' ', '.')
			if url in str(self.sources):
				return

			hash = re.compile('btih:(.*?)&').findall(url)[0]

			name = url.split('&dn=')[1]
			name = source_utils.clean_name(self.title, name)
			if source_utils.remove_lang(name, self.episode_title):
				return

			if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year):
				return

			# filter for episode multi packs (ex. S01E01-E17 is also returned in query)
			if self.episode_title:
				if not source_utils.filter_single_episodes(self.hdlr, name):
					return

			try:
				seeders = int(re.findall('<span style="color:#008000"><strong>\s*([0-9]+)\s*</strong>', row, re.DOTALL)[0].replace(',', ''))
				if self.min_seeders > seeders: 
					return
			except:
				seeders = 0
				pass

			quality, info = source_utils.get_release_quality(name, url)

			try:
				size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', row, re.DOTALL)[0]
				dsize, isize = source_utils._size(size)
				info.insert(0, isize)
			except:
				dsize = 0
				pass

			info = ' | '.join(info)

			self.sources.append({'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
												'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
		except:
			source_utils.scraper_error('MAGNET4YOU')
			pass
예제 #9
0
    def sources(self, url, hostDict, hostprDict):
        self.sources = []
        try:
            if url is None:
                return self.sources

            if debrid.status() is False:
                return self.sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.episode_title = data[
                'title'] if 'tvshowtitle' in data else None
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else data['year']
            self.year = data['year']

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub('[^A-Za-z0-9\s\.-]+', '', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            try:
                r = client.request(url, timeout='5')
                if r is None:
                    return self.sources
                links = re.findall('<a href=(/torrent/.+?)>', r, re.DOTALL)

                threads = []
                for link in links:
                    threads.append(workers.Thread(self.get_sources, link))
                [i.start() for i in threads]
                [i.join() for i in threads]
                return self.sources
            except:
                source_utils.scraper_error('TORLOCK')
                return self.sources
        except:
            source_utils.scraper_error('TORLOCK')
            return self.sources
예제 #10
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url is None:
             return
         url = urljoin(self.base_link, url) if url.startswith('/') else url
         url = url.split('online.html')[0]
         url = '%s%s-online.html' % (url, 'season-%01d-episode-%01d' %
                                     (int(season), int(episode)))
         url = client.replaceHTMLCodes(url)
         try:
             url = url.encode('utf-8')
         except:
             pass
         return url
     except:
         source_utils.scraper_error('PRIMEWIRE')
         return
예제 #11
0
    def sources(self, url, hostDict, hostprDict):
        self.sources = []
        try:
            if url is None:
                return self.sources

            if debrid.status() is False:
                return self.sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.episode_title = data[
                'title'] if 'tvshowtitle' in data else None
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else data['year']
            self.year = data['year']

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub('[^A-Za-z0-9\s\.-]+', '', query)

            urls = []
            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            urls.append(url)
            # urls.append(url.replace('page=0', 'page=40')) # server response time WAY to slow to parse 2 pages deep, site sucks.
            # log_utils.log('urls = %s' % urls, log_utils.LOGDEBUG)

            threads = []
            for url in urls:
                threads.append(workers.Thread(self.get_sources, url))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('ISOHUNT2')
            return self.sources
예제 #12
0
 def resolve(self, url):
     try:
         if '/stream/' in url or '/watch/' in url:
             r = client.request(url, referer=self.base_link)
             link = client.parseDOM(r,
                                    'a',
                                    ret='data-href',
                                    attrs={'id': 'iframe_play'})[0]
         else:
             try:
                 data = client.request(url, referer=self.base_link)
                 data = re.findall(r'\s*(eval.+?)\s*</script', data,
                                   re.DOTALL)[0]
                 link = jsunpack.unpack(data)
                 link = link.replace('\\', '')
                 if 'eval' in link:
                     link = jsunpack.unpack(link)
                 link = link.replace('\\', '')
                 host = re.findall('hosted=\'(.+?)\';var', link,
                                   re.DOTALL)[0]
                 if 'streamango' in host:
                     loc = re.findall('''loc\s*=\s*['"](.+?)['"]''', link,
                                      re.DOTALL)[0]
                     link = 'https://streamango.com/embed/{0}'.format(loc)
                 elif 'openload' in host:
                     loc = re.findall('''loc\s*=\s*['"](.+?)['"]''', link,
                                      re.DOTALL)[0]
                     link = 'https://openload.co/embed/{0}'.format(loc)
                 else:
                     link = re.findall('''loc\s*=\s*['"](.+?)['"]\;''',
                                       re.DOTALL)[0]
             except:
                 source_utils.scraper_error('PRIMEWIRE')
                 link = client.request(url, output='geturl', timeout=10)
                 if link == url:
                     return
                 else:
                     return link
         return link
     except:
         source_utils.scraper_error('PRIMEWIRE')
         return
예제 #13
0
	def sources(self, url, hostDict, hostprDict):
		self.sources = []
		try:
			if url is None:
				return self.sources

			if debrid.status() is False:
				return self.sources

			data = parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			self.title = self.title.replace('&', 'and').replace('Special Victims Unit', 'SVU')
			self.aliases = data['aliases']
			self.episode_title = data['title'] if 'tvshowtitle' in data else None
			self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
			self.year = data['year']

			query = '%s %s' % (self.title, self.hdlr)
			query = re.sub('[^A-Za-z0-9\s\.-]+', '', query)

			url = self.search_link % quote_plus(query)
			url = urljoin(self.base_link, url)
			# log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

			r = client.request(url)
			if not r:
				return self.sources
			rows = client.parseDOM(r, 'div', attrs={'id': 'profile1'})

			threads = []
			for row in rows:
				threads.append(workers.Thread(self.get_sources, row))
			[i.start() for i in threads]
			[i.join() for i in threads]
			return self.sources
		except:
			source_utils.scraper_error('MAGNET4YOU')
			return self.sources
예제 #14
0
    def sources(self, url, hostDict, hostprDict):
        scraper = cfscrape.create_scraper()
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title']
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            year = data['year']

            query = re.sub('[^A-Za-z0-9\s\.-]+', '', title)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)
            try:
                r = scraper.get(url).content
                if not r:
                    return sources
                if any(value in str(r) for value in [
                        'No movies found', 'something went wrong',
                        'Connection timed out'
                ]):
                    return sources
                r = json.loads(r)
                id = ''
                for i in r:
                    if i['original_title'] == title and i[
                            'release_date'] == year:
                        id = i['id']
                        break
                if id == '':
                    return sources
                link = '%s%s%s' % (self.base_link, '/movies/torrents?id=', id)

                result = scraper.get(link).content
                if 'magnet' not in result:
                    return sources
                result = re.sub(r'\n', '', result)
                links = re.findall(
                    r'<tr>.*?<a title="Download:\s*(.+?)"href="(magnet:.+?)">.*?title="File Size">\s*(.+?)\s*</td>.*?title="Seeds">([0-9]+|[0-9]+,[0-9]+)\s*<',
                    result)

                for link in links:
                    name = link[0]
                    name = unquote_plus(name)
                    name = source_utils.clean_name(title, name)
                    if source_utils.remove_lang(name, episode_title):
                        continue

                    if not source_utils.check_title(title.replace('&', 'and'),
                                                    aliases, name, year, year):
                        continue

                    url = link[1]
                    try:
                        url = unquote_plus(url).decode('utf8').replace(
                            '&amp;', '&').replace(' ', '.')
                    except:
                        url = unquote_plus(url).replace('&amp;',
                                                        '&').replace(' ', '.')
                    url = url.split('&tr')[0]
                    hash = re.compile('btih:(.*?)&').findall(url)[0]

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = link[2]
                        dsize, isize = source_utils._size(size)
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    info = ' | '.join(info)

                    try:
                        seeders = int(link[3].replace(',', ''))
                        if self.min_seeders > seeders:
                            continue
                    except:
                        seeders = 0
                        pass

                    sources.append({
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize
                    })
                return sources
            except:
                source_utils.scraper_error('MOVIEMAGNET')
                return sources
        except:
            source_utils.scraper_error('MOVIEMAGNET')
            return sources
예제 #15
0
    def get_sources(self, url):
        try:
            r = client.request(url, timeout='10')
            if not r:
                return
            posts = client.parseDOM(r, 'tbody')[0]
            posts = client.parseDOM(posts, 'tr')

            for post in posts:
                post = re.sub(r'\n', '', post)
                post = re.sub(r'\t', '', post)
                links = re.compile(
                    '<a href="(/torrent_details/.+?)"><span>(.+?)</span>.*?<td class="size-row">(.+?)</td><td class="sn">([0-9]+)</td>'
                ).findall(post)
                for items in links:
                    # item[1] does not contain full info like the &dn= portion of magnet
                    link = urljoin(self.base_link, items[0])
                    link = client.request(link, timeout='10')
                    if not link:
                        continue

                    magnet = re.compile('(magnet.+?)"').findall(link)[0]
                    url = unquote_plus(magnet).replace('&amp;',
                                                       '&').replace(' ', '.')
                    url = url.split('&tr')[0]
                    name = unquote_plus(url.split('&dn=')[1])
                    name = source_utils.clean_name(self.title, name)
                    if source_utils.remove_lang(name, self.episode_title):
                        continue

                    if not source_utils.check_title(self.title, self.aliases,
                                                    name, self.hdlr,
                                                    self.year):
                        continue

                    hash = re.compile('btih:(.*?)&').findall(url)[0]

                    # filter for episode multi packs (ex. S01E01-E17 is also returned in query)
                    if self.episode_title:
                        if not source_utils.filter_single_episodes(
                                self.hdlr, name):
                            continue

                    try:
                        seeders = int(items[3].replace(',', ''))
                        if self.min_seeders > seeders:
                            continue
                    except:
                        seeders = 0
                        pass

                    quality, info = source_utils.get_release_quality(name, url)
                    try:
                        size = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                            items[2])[0]
                        dsize, isize = source_utils._size(size)
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass
                    info = ' | '.join(info)

                    self.sources.append({
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize
                    })
        except:
            source_utils.scraper_error('ISOHUNT2')
            pass
예제 #16
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else ('(' +
                                                                 data['year'] +
                                                                 ')')

            # query = '%s %s' % (title, hdlr) #site now  fails with year in query
            query = title
            query = re.sub('[^A-Za-z0-9\s\.-]+', '', query)

            if 'tvshowtitle' in data:
                url = self.show_link % query.replace(' ', '-')
            else:
                url = self.search_link % quote_plus(query)

            url = urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, __name__, log_utils.LOGDEBUG)

            r = client.request(url)
            if not r:
                return sources
            if 'No results were found' in r:
                return sources

            r = client.parseDOM(r, 'div', attrs={'class': 'card'})
            for i in r:
                url = re.compile('href="(magnet.+?)\s*?"').findall(i)[0]
                try:
                    url = unquote_plus(url).decode('utf8').replace(
                        '&amp;', '&').replace(' ', '.')
                except:
                    url = unquote_plus(url).replace('&amp;',
                                                    '&').replace(' ', '.')
                url = url.split('&tr=')[0].replace(' ', '.')
                hash = re.compile('btih:(.*?)&').findall(url)[0]

                name = url.split('&dn=')[1]
                name = source_utils.clean_name(title, name)
                if source_utils.remove_lang(name, episode_title):
                    continue

                if not source_utils.check_title(
                        title, aliases, name,
                        hdlr.replace('(', '').replace(')', ''), data['year']):
                    continue

                seeders = 0  # seeders not available on topnow
                quality, info = source_utils.get_release_quality(name, url)

                try:
                    size = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', i
                    )[-1]  # file size is no longer available on topnow's new site
                    dsize, isize = source_utils._size(size)
                    info.insert(0, isize)
                except:
                    dsize = 0
                    pass

                info = ' | '.join(info)

                sources.append({
                    'source': 'torrent',
                    'seeders': seeders,
                    'hash': hash,
                    'name': name,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize
                })

            return sources
        except:
            source_utils.scraper_error('TOPNOW')
            return sources
예제 #17
0
    def get_sources(self, link):
        try:
            url = 'magnet:%s' % (re.findall('a href="magnet:(.+?)"', link,
                                            re.DOTALL)[0])
            url = unquote_plus(url).split('&tr')[0].replace('&amp;',
                                                            '&').replace(
                                                                ' ', '.')
            url = source_utils.strip_non_ascii_and_unprintable(url)
            if url in str(self.sources):
                return

            hash = re.compile('btih:(.*?)&').findall(url)[0]

            name = url.split('&dn=')[1]
            name = source_utils.clean_name(self.title, name)
            if source_utils.remove_lang(name, self.episode_title):
                return

            if not source_utils.check_title(self.title, self.aliases, name,
                                            self.hdlr, self.year):
                return

            # filter for episode multi packs (ex. S01E01-E17 is also returned in query)
            if self.episode_title:
                if not source_utils.filter_single_episodes(self.hdlr, name):
                    return

            try:
                seeders = int(
                    client.parseDOM(link, 'td',
                                    attrs={'class': 'sy'})[0].replace(',', ''))
                if self.min_seeders > seeders:
                    return
            except:
                seeders = 0
                pass

            quality, info = source_utils.get_release_quality(name, url)

            try:
                size = re.findall(
                    '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                    link)[0]
                dsize, isize = source_utils._size(size)
                info.insert(0, isize)
            except:
                source_utils.scraper_error('EXTRATORRENT')
                dsize = 0
                pass

            info = ' | '.join(info)

            self.sources.append({
                'source': 'torrent',
                'seeders': seeders,
                'hash': hash,
                'name': name,
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'direct': False,
                'debridonly': True,
                'size': dsize
            })
        except:
            source_utils.scraper_error('EXTRATORRENT')
            pass
예제 #18
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle']
            hdlr = 's%02de%02d' % (int(data['season']), int(data['episode']))
            query = quote_plus(cleantitle.getsearch(title))
            surl = urljoin(self.base_link, self.search_link % query)
            # log_utils.log('surl = %s' % surl, log_utils.LOGDEBUG)

            r = client.request(surl, XHR=True)
            r = json.loads(r)
            r = r['series']

            for i in r:
                tit = i['value']

                if cleantitle.get(title) != cleantitle.get(tit):
                    continue
                slink = i['seo']
                slink = urljoin(self.base_link, slink)
                r = client.request(slink)

                if not data['imdb'] in r:
                    continue

                data = client.parseDOM(r, 'div', {'class': 'el-item\s*'})

                epis = [
                    client.parseDOM(i, 'a', ret='href')[0] for i in data if i
                ]
                epis = [i for i in epis if hdlr in i.lower()][0]

                r = client.request(epis)
                links = client.parseDOM(r, 'a', ret='data-actuallink')

                for url in links:
                    try:
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if not valid:
                            continue
                        sources.append({
                            'source': host,
                            'quality': 'SD',
                            'info': '',
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })
                    except:
                        source_utils.scraper_error('WATCHEPISODES')
                        return sources
            return sources
        except:
            source_utils.scraper_error('WATCHEPISODES')
            return sources
예제 #19
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            url = urljoin(self.base_link,
                          url) if not url.startswith('http') else url

            result = client.request(url)
            data = re.findall(r'\s*(eval.+?)\s*</script', result, re.DOTALL)[1]
            data = jsunpack.unpack(data).replace('\\', '')

            patern = '''rtv='(.+?)';var aa='(.+?)';var ba='(.+?)';var ca='(.+?)';var da='(.+?)';var ea='(.+?)';var fa='(.+?)';var ia='(.+?)';var ja='(.+?)';var ka='(.+?)';'''
            links_url = re.findall(patern, data, re.DOTALL)[0]
            slug = 'slug={}'.format(url.split('/')[-1])
            links_url = self.base_link + [''.join(links_url)][0].replace(
                'slug=', slug)
            links = client.request(links_url)
            links = client.parseDOM(links, 'tbody')

            for i in links:
                try:
                    data = [(client.parseDOM(i, 'a', ret='href')[0],
                             client.parseDOM(i,
                                             'span',
                                             attrs={'class':
                                                    'version_host'})[0])][0]
                    url = urljoin(self.base_link, data[0])
                    url = client.replaceHTMLCodes(url)
                    try:
                        url = url.encode('utf-8')
                    except:
                        pass

                    host = data[1]
                    valid, host = source_utils.is_host_valid(host, hostDict)
                    if not valid:
                        continue

                    quality = client.parseDOM(i, 'span', ret='class')[0]
                    quality, info = source_utils.get_release_quality(
                        quality, url)

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'info': '',
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    source_utils.scraper_error('PRIMEWIRE')
                    pass

            return sources
        except:
            source_utils.scraper_error('PRIMEWIRE')
            return sources
예제 #20
0
	def get_sources_packs(self, url):
		try:
			r = client.request(url, timeout='5')
			if not r:
				return

			r = client.parseDOM(r, 'table', attrs={'class': 'tmain'})[0]
			links = re.findall('<a href="(/torrent/.+?)">(.+?)</a>', r, re.DOTALL)

			for link in links:
				try:
					url = link[0].encode('ascii', errors='ignore').decode('ascii', errors='ignore').replace('&nbsp;', ' ')
				except:
					url = link[0].replace('&nbsp;', ' ')
				if '/torrent/' not in url:
					continue

				try:
					name = link[1].encode('ascii', errors='ignore').decode('ascii', errors='ignore').replace('&nbsp;', '.')
				except:
					name = link[1].replace('&nbsp;', '.')
				if '<span' in name:
					nam = name.split('<span')[0].replace(' ', '.')
					span = client.parseDOM(name, 'span')[0].replace('-', '.')
					name = '%s%s' % (nam, span)
				name = source_utils.clean_name(self.title, name)
				if source_utils.remove_lang(name):
					continue

				if not self.search_series:
					if not self.bypass_filter:
						if not source_utils.filter_season_pack(self.title, self.aliases, self.year, self.season_x, name):
							continue
					package = 'season'

				elif self.search_series:
					if not self.bypass_filter:
						valid, last_season = source_utils.filter_show_pack(self.title, self.aliases, self.imdb, self.year, self.season_x, name, self.total_seasons)
						if not valid:
							continue
					else:
						last_season = self.total_seasons
					package = 'show'

				if not url.startswith('http'): 
					link = urljoin(self.base_link, url)

				link = client.request(link, timeout='5')
				if link is None:
					continue
				hash = re.findall('<b>Infohash</b></td><td valign=top>(.+?)</td>', link, re.DOTALL)[0]
				url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name)
				if url in str(self.sources):
					continue

				try:
					seeders = int(re.findall('<b>Swarm:</b></td><td valign=top><font color=red>([0-9]+)</font>', link, re.DOTALL)[0].replace(',', ''))
					if self.min_seeders > seeders: # site does not seem to report seeders
						continue
				except:
					seeders = 0
					pass

				quality, info = source_utils.get_release_quality(name, url)

				try:
					size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', link)[0]
					dsize, isize = source_utils._size(size)
					info.insert(0, isize)
				except:
					dsize = 0
					pass

				info = ' | '.join(info)

				item = {'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
							'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'package': package}
				if self.search_series:
					item.update({'last_season': last_season})
				self.sources.append(item)
		except:
			source_utils.scraper_error('TORRENTFUNK')
			pass
예제 #21
0
    def get_sources_packs(self, link):
        # log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG)
        try:
            r = client.request(link)
            if not r:
                return
            if '<tbody' not in r:
                return
            posts = client.parseDOM(r, 'tbody')[0]
            posts = client.parseDOM(posts, 'tr')
        except:
            source_utils.scraper_error('SKYTORRENTS')
            return

        for post in posts:
            try:
                post = re.sub(r'\n', '', post)
                post = re.sub(r'\t', '', post)
                link = re.findall(
                    'href="(magnet:.+?)".+<td style="text-align: center;color:green;">([0-9]+|[0-9]+,[0-9]+)</td>',
                    post, re.DOTALL)

                for url, seeders, in link:
                    url = unquote_plus(url).split('&tr')[0].replace(
                        '&amp;', '&').replace(' ', '.')
                    url = source_utils.strip_non_ascii_and_unprintable(url)
                    if url in str(self.sources):
                        return

                    hash = re.compile('btih:(.*?)&').findall(url)[0]

                    name = url.split('&dn=')[1]
                    name = source_utils.clean_name(self.title, name)
                    if source_utils.remove_lang(name):
                        continue

                    if not self.search_series:
                        if not self.bypass_filter:
                            if not source_utils.filter_season_pack(
                                    self.title, self.aliases, self.year,
                                    self.season_x, name):
                                continue
                        package = 'season'

                    elif self.search_series:
                        if not self.bypass_filter:
                            valid, last_season = source_utils.filter_show_pack(
                                self.title, self.aliases, self.imdb, self.year,
                                self.season_x, name, self.total_seasons)
                            if not valid:
                                continue
                        else:
                            last_season = self.total_seasons
                        package = 'show'

                    try:
                        seeders = int(seeders)
                        if self.min_seeders > seeders:
                            continue
                    except:
                        seeders = 0
                        pass

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                            post)[0]
                        dsize, isize = source_utils._size(size)
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    info = ' | '.join(info)

                    item = {
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize,
                        'package': package
                    }
                    if self.search_series:
                        item.update({'last_season': last_season})
                    self.sources.append(item)
            except:
                source_utils.scraper_error('SKYTORRENTS')
                pass
예제 #22
0
    def get_sources(self, link):
        try:
            url = '%s%s' % (self.base_link, link)
            result = client.request(url, timeout='5')
            if result is None:
                return
            if 'magnet' not in result:
                return

            url = 'magnet:%s' % (re.findall('a href="magnet:(.+?)"', result,
                                            re.DOTALL)[0])
            url = unquote_plus(url).split('&tr=')[0].replace('&amp;',
                                                             '&').replace(
                                                                 ' ', '.')
            url = source_utils.strip_non_ascii_and_unprintable(url)
            if url in str(self.sources):
                return

            hash = re.compile('btih:(.*?)&').findall(url)[0]

            name = url.split('&dn=')[1]
            name = source_utils.clean_name(self.title, name)
            if source_utils.remove_lang(name, self.episode_title):
                return

            if not source_utils.check_title(self.title, self.aliases, name,
                                            self.hdlr, self.year):
                return

            # filter for episode multi packs (ex. S01E01-E17 is also returned in query)
            if self.episode_title:
                if not source_utils.filter_single_episodes(self.hdlr, name):
                    return

            try:
                seeders = int(
                    re.findall('<dt>SWARM</dt><dd>.*?>([0-9]+)</b>', result,
                               re.DOTALL)[0].replace(',', ''))
                if self.min_seeders > seeders:
                    return
            except:
                seeders = 0
                pass

            quality, info = source_utils.get_release_quality(name, url)

            try:
                size = re.findall('<dt>SIZE</dt><dd>(.*? [a-zA-Z]{2})', result,
                                  re.DOTALL)[0]
                dsize, isize = source_utils._size(size)
                info.insert(0, isize)
            except:
                dsize = 0
                pass

            info = ' | '.join(info)

            self.sources.append({
                'source': 'torrent',
                'seeders': seeders,
                'hash': hash,
                'name': name,
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'direct': False,
                'debridonly': True,
                'size': dsize
            })
        except:
            source_utils.scraper_error('TORLOCK')
            pass
예제 #23
0
	def get_sources_packs(self, link):
		# log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG)
		try:
			r = client.request(link)
			if not r:
				return
			div = client.parseDOM(r, 'div', attrs={'id': 'div2child'})

			for row in div:
				row = client.parseDOM(r, 'div', attrs={'class': 'resultdivbotton'})
				if not row:
					return

				for post in row:
					hash = re.findall('<div id="hideinfohash.+?" class="hideinfohash">(.+?)<', post, re.DOTALL)[0]
					name = re.findall('<div id="hidename.+?" class="hideinfohash">(.+?)<', post, re.DOTALL)[0]
					name = unquote_plus(name)
					name = source_utils.clean_name(self.title, name)
					if source_utils.remove_lang(name):
						continue

					url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name)

					if url in str(self.sources):
						continue

					if not self.search_series:
						if not self.bypass_filter:
							if not source_utils.filter_season_pack(self.title, self.aliases, self.year, self.season_x, name):
								continue
						package = 'season'

					elif self.search_series:
						if not self.bypass_filter:
							valid, last_season = source_utils.filter_show_pack(self.title, self.aliases, self.imdb, self.year, self.season_x, name, self.total_seasons)
							if not valid:
								continue
						else:
							last_season = self.total_seasons
						package = 'show'

					try:
						seeders = int(re.findall('<div class="resultdivbottonseed">([0-9]+|[0-9]+,[0-9]+)<', post, re.DOTALL)[0].replace(',', ''))
						if self.min_seeders > seeders:
							continue
					except:
						seeders = 0
						pass

					quality, info = source_utils.get_release_quality(name, url)

					try:
						size = re.findall('<div class="resultdivbottonlength">(.+?)<', post)[0]
						dsize, isize = source_utils._size(size)
						info.insert(0, isize)
					except:
						dsize = 0
						pass

					info = ' | '.join(info)

					item = {'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
								'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'package': package}
					if self.search_series:
						item.update({'last_season': last_season})
					self.sources.append(item)
		except:
			source_utils.scraper_error('IDOPE')
			pass
예제 #24
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources
            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('[^A-Za-z0-9\s\.-]+', '', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            r = client.request(url)
            if '<tbody' not in r:
                return sources

            posts = client.parseDOM(r, 'tbody')[0]
            posts = client.parseDOM(posts, 'tr')

        except:
            source_utils.scraper_error('SKYTORRENTS')
            return sources

        for post in posts:
            try:
                post = re.sub(r'\n', '', post)
                post = re.sub(r'\t', '', post)
                link = re.findall(
                    'href="(magnet:.+?)".+<td style="text-align: center;color:green;">([0-9]+|[0-9]+,[0-9]+)</td>',
                    post, re.DOTALL)

                for url, seeders, in link:
                    url = unquote_plus(url).split('&tr')[0].replace(
                        '&amp;', '&').replace(' ', '.')
                    url = source_utils.strip_non_ascii_and_unprintable(url)
                    if url in str(self.sources):
                        return

                    hash = re.compile('btih:(.*?)&').findall(url)[0]

                    name = url.split('&dn=')[1]
                    name = source_utils.clean_name(title, name)
                    if source_utils.remove_lang(name, episode_title):
                        continue

                    if not source_utils.check_title(title, aliases, name, hdlr,
                                                    data['year']):
                        continue

                    # filter for episode multi packs (ex. S01E01-E17 is also returned in query)
                    if episode_title:
                        if not source_utils.filter_single_episodes(hdlr, name):
                            continue

                    try:
                        seeders = int(seeders)
                        if self.min_seeders > seeders:
                            continue
                    except:
                        seeders = 0
                        pass

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                            post)[0]
                        dsize, isize = source_utils._size(size)
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    info = ' | '.join(info)

                    sources.append({
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize
                    })
            except:
                source_utils.scraper_error('SKYTORRENTS')
                return sources
        return sources
예제 #25
0
	def get_sources(self, link):
		try:
			try:
				url = link[0].encode('ascii', errors='ignore').decode('ascii', errors='ignore').replace('&nbsp;', ' ')
			except:
				url = link[0].replace('&nbsp;', ' ')

			if '/torrent/' not in url:
				return

			try:
				name = link[1].encode('ascii', errors='ignore').decode('ascii', errors='ignore').replace('&nbsp;', '.')
			except:
				name = link[1].replace('&nbsp;', '.')
			if '<span' in name:
				nam = name.split('<span')[0].replace(' ', '.')
				span = client.parseDOM(name, 'span')[0].replace('-', '.')
				name = '%s%s' % (nam, span)
			name = source_utils.clean_name(self.title, name)
			if source_utils.remove_lang(name, self.episode_title):
				return

			if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year):
				return

			# filter for episode multi packs (ex. S01E01-E17 is also returned in query)
			if self.episode_title:
				if not source_utils.filter_single_episodes(self.hdlr, name):
					return

			if not url.startswith('http'): 
				link = urljoin(self.base_link, url)

			link = client.request(link, timeout='5')
			if link is None:
				return
			hash = re.findall('<b>Infohash</b></td><td valign=top>(.+?)</td>', link, re.DOTALL)[0]
			url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name)
			if url in str(self.sources):
				return

			try:
				seeders = int(re.findall('<b>Swarm:</b></td><td valign=top><font color=red>([0-9]+)</font>', link, re.DOTALL)[0].replace(',', ''))
				if self.min_seeders > seeders: # site does not seem to report seeders
					return
			except:
				seeders = 0
				pass

			quality, info = source_utils.get_release_quality(name, url)

			try:
				size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', link)[0]
				dsize, isize = source_utils._size(size)
				info.insert(0, isize)
			except:
				dsize = 0
				pass

			info = ' | '.join(info)

			self.sources.append({'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
												'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
		except:
			source_utils.scraper_error('TORRENTFUNK')
			pass