Exemple #1
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []
			if not url:
				return sources
			data = parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
			aliases = eval(data['aliases'])
			headers = {}
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			year = data['year']
			if 'tvshowtitle' in data:
				episode = data['episode']
				season = data['season']
				url = self._search(title, year, aliases, headers)
				# url = url.replace('online-free', 'season-%s-episode-%s-online-free' % (season, episode))
				url = url.rstrip('/') + '-s%se%s' % (season, episode)
			else:
				episode = None
				year = data['year']
				url = self._search(data['title'], data['year'], aliases, headers)
			url = url if 'http' in url else urljoin(self.base_link, url)
			# log_utils.log('url = %s' % url, __name__, log_utils.LOGDEBUG)
			result = client.request(url);
			result = client.parseDOM(result, 'li', attrs={'class':'link-button'})
			links = client.parseDOM(result, 'a', ret='href')
			i = 0
			for l in links:
				if i == 15:
					break
				try:
					l = l.split('=')[1]
					l = urljoin(self.base_link, self.video_link % l)
					result = client.request(l, post={}, headers={'Referer':url})
					u = result if 'http' in result else 'http:' + result
					if ' href' in u:
						u = u.replace('\r', '').replace('\n', '').replace('\t', '')
						u = 'http:' + re.compile(r" href='(.+?)'").findall(u)[0]
					if 'google' in u:
						valid, hoster = source_utils.is_host_valid(u, hostDict)
						urls, host, direct = source_utils.check_directstreams(u, hoster)
						for x in urls:
							sources.append({'source': host, 'quality': x['quality'], 'language': 'en', 'url': x['url'], 'direct': direct, 'debridonly': False})
					else:
						valid, hoster = source_utils.is_host_valid(u, hostDict)
						if not valid:
							continue
						try: u = u.decode('utf-8')
						except: u = source_utils.strip_non_ascii_and_unprintable(u)
						sources.append({'source': hoster, 'quality': '720p', 'language': 'en', 'url': u, 'direct': False, 'debridonly': False})
						i += 1
				except:
					source_utils.scraper_error('5MOVIES')
					pass
			return sources
		except:
			source_utils.scraper_error('5MOVIES')
			return sources
Exemple #2
0
	def get_sources(self, link):
		try:
			url = re.compile('href="(.+?)"').findall(link)[0]
			url = '%s%s' % (self.base_link, url)
			result = client.request(url)
			if result is None:
				return
			if 'magnet' not in result:
				return

			url = 'magnet:%s' % (re.findall('a href="magnet:(.+?)"', result, re.DOTALL)[0])
			url = unquote_plus(url).split('&xl=')[0].replace('&', '&').replace(' ', '.')
			url = source_utils.strip_non_ascii_and_unprintable(url)
			if url in str(self.sources):
				return

			hash = re.compile('btih:(.*?)&').findall(url)[0]

			name = url.split('&dn=')[1]
			name = source_utils.clean_name(self.title, name)
			if source_utils.remove_lang(name, self.episode_title):
				return

			if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year):
				return

			# filter for episode multi packs (ex. S01E01-E17 is also returned in query)
			if self.episode_title:
				if not source_utils.filter_single_episodes(self.hdlr, name):
					return

			try:
				seeders = int(re.findall(r'<b>Seeds: </b>.*?>([0-9]+|[0-9]+,[0-9]+)</font>', result, re.DOTALL)[0].replace(',', ''))
				if self.min_seeders > seeders:
					return
			except:
				seeders = 0
				pass

			quality, info = source_utils.get_release_quality(name, url)

			try:
				size = re.findall(r'<b>Total Size:</b></td><td>(.*?)</td>', result, re.DOTALL)[0].strip()
				dsize, isize = source_utils._size(size)
				info.insert(0, isize)
			except:
				dsize = 0
				pass

			info = ' | '.join(info)

			self.sources.append({'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
											'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
		except:
			source_utils.scraper_error('ETTV')
			pass
Exemple #3
0
	def get_sources(self, url):
		try:
			r = client.request(url)
			if not r:
				return
			posts = client.parseDOM(r, 'tr')

			for post in posts:
				link = re.findall('a title="Download Torrent Magnet" href="(magnet:.+?)"', post, re.DOTALL)
				if not link:
					continue

				for url in link:
					url = unquote_plus(url).split('&tr')[0].replace('&amp;', '&').replace(' ', '.')
					url = source_utils.strip_non_ascii_and_unprintable(url)
					hash = re.compile('btih:(.*?)&').findall(url)[0]
					name = url.split('&dn=')[1]
					name = source_utils.clean_name(self.title, name)
					if source_utils.remove_lang(name, self.episode_title):
						continue

					if not source_utils.check_title(self.title, self.aliases, name, self.hdlr, self.year):
						continue

					# filter for episode multi packs (ex. S01E01-E17 is also returned in query)
					if self.episode_title:
						if not source_utils.filter_single_episodes(self.hdlr, name):
							continue

					try:
						seeders = int(client.parseDOM(post, 'td', attrs={'class': 'seeds is-hidden-sm-mobile'})[0].replace(',', ''))
						if self.min_seeders > seeders:
							continue
					except:
						seeders = 0
						pass

					quality, info = source_utils.get_release_quality(name, url)

					try:
						size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
						dsize, isize = source_utils._size(size)
						info.insert(0, isize)
					except:
						dsize = 0
						pass

					info = ' | '.join(info)

					self.sources.append({'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
													'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
		except:
			source_utils.scraper_error('BTSCENE')
			pass
Exemple #4
0
    def _get_sources(self, item):
        try:
            name = item[0]

            quality, info = source_utils.get_release_quality(name, item[1])

            if item[2] != '0':
                info.insert(0, item[2])
            info = ' | '.join(info)

            data = client.request(item[1])
            data = client.parseDOM(data, 'a', ret='href')

            url = [i for i in data if 'magnet:' in i][0]
            url = unquote_plus(url).split('&tr')[0].replace('&amp;',
                                                            '&').replace(
                                                                ' ', '.')
            url = source_utils.strip_non_ascii_and_unprintable(url)

            hash = re.compile('btih:(.*?)&').findall(url)[0]

            self._sources.append({
                'source': 'torrent',
                'seeders': item[4],
                'hash': hash,
                'name': name,
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'direct': False,
                'debridonly': True,
                'size': item[3]
            })
        except:
            source_utils.scraper_error('1337X')
            pass
Exemple #5
0
	def get_sources_packs(self, link):
		# log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG)
		try:
			r = client.request(link)
			if not r:
				return
			posts = client.parseDOM(r, 'tr')

			for post in posts:
				link = re.findall('a title="Download Torrent Magnet" href="(magnet:.+?)"', post, re.DOTALL)
				if not link:
					continue

				for url in link:
					url = unquote_plus(url).split('&tr')[0].replace('&amp;', '&').replace(' ', '.')
					url = source_utils.strip_non_ascii_and_unprintable(url)

					hash = re.compile('btih:(.*?)&').findall(url)[0]
					name = url.split('&dn=')[1]
					name = source_utils.clean_name(self.title, name)
					if source_utils.remove_lang(name):
						continue

					if not self.search_series:
						if not self.bypass_filter:
							if not source_utils.filter_season_pack(self.title, self.aliases, self.year, self.season_x, name):
								continue
						package = 'season'

					elif self.search_series:
						if not self.bypass_filter:
							valid, last_season = source_utils.filter_show_pack(self.title, self.aliases, self.imdb, self.year, self.season_x, name, self.total_seasons)
							if not valid:
								continue
						else:
							last_season = self.total_seasons
						package = 'show'

					try:
						seeders = int(client.parseDOM(post, 'td', attrs={'class': 'seeds is-hidden-sm-mobile'})[0].replace(',', ''))
						if self.min_seeders > seeders:
							continue
					except:
						seeders = 0
						pass

					quality, info = source_utils.get_release_quality(name, url)

					try:
						size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
						dsize, isize = source_utils._size(size)
						info.insert(0, isize)
					except:
						dsize = 0
						pass

					info = ' | '.join(info)

					item = {'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
								'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'package': package}
					if self.search_series:
						item.update({'last_season': last_season})
					self.sources.append(item)
		except:
			source_utils.scraper_error('BTSCENE')
			pass
Exemple #6
0
    def get_sources_packs(self, link):
        # log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG)
        try:
            r = client.request(link)
            if not r:
                return
            r = json.loads(r)
            results = r['results']

            for item in results:
                try:
                    url = unquote_plus(item['magnet']).replace(' ', '.')
                    url = re.sub(
                        r'(&tr=.+)&dn=', '&dn=',
                        url)  # some links on solidtorrents &tr= before &dn=
                    url = source_utils.strip_non_ascii_and_unprintable(url)
                    hash = item['infohash'].lower()

                    name = item['title']
                    name = source_utils.clean_name(self.title, name)
                    if source_utils.remove_lang(name):
                        continue

                    if not self.search_series:
                        if not self.bypass_filter:
                            if not source_utils.filter_season_pack(
                                    self.title, self.aliases, self.year,
                                    self.season_x, name):
                                continue
                        package = 'season'

                    elif self.search_series:
                        if not self.bypass_filter:
                            valid, last_season = source_utils.filter_show_pack(
                                self.title, self.aliases, self.imdb, self.year,
                                self.season_x, name, self.total_seasons)
                            if not valid:
                                continue
                        else:
                            last_season = self.total_seasons
                        package = 'show'

                    if url in str(self.sources):
                        continue

                    try:
                        seeders = int(item['swarm']['seeders'])
                        if self.min_seeders > seeders:
                            continue
                    except:
                        seeders = 0
                        pass

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        dsize, isize = source_utils.convert_size(item["size"],
                                                                 to='GB')
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    info = ' | '.join(info)

                    item = {
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize,
                        'package': package
                    }
                    if self.search_series:
                        item.update({'last_season': last_season})
                    self.sources.append(item)
                except:
                    source_utils.scraper_error('SOLIDTORRENTS')
                    pass
        except:
            source_utils.scraper_error('SOLIDTORRENTS')
            pass
Exemple #7
0
    def get_sources(self, url):
        try:
            r = client.request(url)
            if not r:
                return

            r = json.loads(r)
            results = r['results']

            for item in results:
                try:
                    url = unquote_plus(item['magnet']).replace(' ', '.')
                    url = re.sub(
                        r'(&tr=.+)&dn=', '&dn=',
                        url)  # some links on solidtorrents &tr= before &dn=
                    url = source_utils.strip_non_ascii_and_unprintable(url)
                    hash = item['infohash'].lower()

                    name = item['title']
                    name = source_utils.clean_name(self.title, name)
                    if source_utils.remove_lang(name, self.episode_title):
                        continue

                    if not source_utils.check_title(self.title, self.aliases,
                                                    name, self.hdlr,
                                                    self.year):
                        continue

                    # filter for episode multi packs (ex. S01E01-E17 is also returned in query)
                    if self.episode_title:
                        if not source_utils.filter_single_episodes(
                                self.hdlr, name):
                            continue

                    if url in str(self.sources):
                        continue

                    try:
                        seeders = int(item['swarm']['seeders'])
                        if self.min_seeders > seeders:
                            continue
                    except:
                        seeders = 0
                        pass

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        dsize, isize = source_utils.convert_size(item["size"],
                                                                 to='GB')
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    info = ' | '.join(info)

                    self.sources.append({
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize
                    })
                except:
                    source_utils.scraper_error('SOLIDTORRENTS')
                    pass
        except:
            source_utils.scraper_error('SOLIDTORRENTS')
            pass
Exemple #8
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if url is None:
				return sources
			if debrid.status() is False:
				return sources

			data = parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU')
			aliases = data['aliases']
			episode_title = data['title'] if 'tvshowtitle' in data else None
			hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

			query = '%s %s' % (title, hdlr)
			query = re.sub('[^A-Za-z0-9\s\.-]+', '', query)

			url = self.search_link % quote_plus(query)
			url = urljoin(self.base_link, url)
			# log_utils.log('url = %s' % url, log_utils.LOGDEBUG)
			api_url = urljoin(self.base_link, self.api_search_link)

			headers = cache.get(self._get_token_and_cookies, 24)
			headers.update({'Referer': url})

			query_data = {
				'query': query,
				'offset': 0,
				'limit': 99,
				'filters[field]': 'seeds',
				'filters[sort]': 'desc',
				'filters[time]': 4,
				'filters[category]': 3 if 'tvshowtitle' not in data else 4,
				'filters[adult]': False,
				'filters[risky]': False}

			rjson = client.request(api_url, post=query_data, headers=headers)
			files = json.loads(rjson)
			error = files.get('error')
			if error:
				return sources

			for file in files.get('content'):
				try:
					name = file.get('name')
					name = source_utils.clean_name(title, name)
					if source_utils.remove_lang(name, episode_title):
						continue
					if not source_utils.check_title(title, aliases, name, hdlr, data['year']):
						continue

					url = unquote_plus(file.get('magnet')).replace('&amp;', '&').replace(' ', '.')
					url = re.sub(r'(&tr=.+)&dn=', '&dn=', url) # some links on bitlord &tr= before &dn=
					url = url.split('&tr=')[0].split('&xl=')[0]
					url = source_utils.strip_non_ascii_and_unprintable(url)

					hash = re.compile('btih:(.*?)&').findall(url)[0]

					# filter for episode multi packs (ex. S01E01-E17 is also returned in query)
					if episode_title:
						if not source_utils.filter_single_episodes(hdlr, name):
							continue

					try:
						seeders = file.get('seeds')
						if self.min_seeders > seeders:
							continue
					except:
						seeders = 0
						pass

					quality, info = source_utils.get_release_quality(name, url)

					try:
						size = file.get('size')
						size = str(size) + ' GB' if len(str(size)) == 1 else str(size) + ' MB'
						dsize, isize = source_utils._size(size)
						info.insert(0, isize)
					except:
						source_utils.scraper_error('BITLORD')
						dsize = 0
						pass

					info = ' | '.join(info)

					sources.append({'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
												'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
				except:
					source_utils.scraper_error('BITLORD')
					return sources
			return sources
		except:
			source_utils.scraper_error('BITLORD')
			return sources
Exemple #9
0
    def get_sources_packs(self, link):
        # log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG)
        try:
            r = self.scraper.get(link).content
            if not r:
                return
            posts = client.parseDOM(r, 'div', attrs={'class': 'tgxtable'})
        except:
            source_utils.scraper_error('TORRENTGALAXY')
            return

        for post in posts:
            try:
                links = zip(
                    re.findall('a href="(magnet:.+?)"', post, re.DOTALL),
                    re.findall(
                        r"<span class='badge badge-secondary' style='border-radius:4px;'>(.*?)</span>",
                        post, re.DOTALL),
                    re.findall(
                        r"<span title='Seeders/Leechers'>\[<font color='green'><b>(.*?)<",
                        post, re.DOTALL))
                for link in links:
                    url = unquote_plus(link[0]).split('&tr')[0].replace(
                        ' ', '.')
                    url = source_utils.strip_non_ascii_and_unprintable(url)
                    hash = re.compile('btih:(.*?)&').findall(url)[0]

                    name = url.split('&dn=')[1]
                    name = source_utils.clean_name(self.title, name)
                    if source_utils.remove_lang(name):
                        continue

                    if not self.search_series:
                        if not self.bypass_filter:
                            if not source_utils.filter_season_pack(
                                    self.title, self.aliases, self.year,
                                    self.season_x, name):
                                continue
                        package = 'season'

                    elif self.search_series:
                        if not self.bypass_filter:
                            valid, last_season = source_utils.filter_show_pack(
                                self.title, self.aliases, self.imdb, self.year,
                                self.season_x, name, self.total_seasons)
                            if not valid:
                                continue
                        else:
                            last_season = self.total_seasons
                        package = 'show'

                    try:
                        seeders = int(link[2])
                        if self.min_seeders > seeders:
                            continue
                    except:
                        seeders = 0
                        pass

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        dsize, isize = source_utils._size(link[1])
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    info = ' | '.join(info)

                    item = {
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize,
                        'package': package
                    }
                    if self.search_series:
                        item.update({'last_season': last_season})
                    self.sources.append(item)
            except:
                source_utils.scraper_error('TORRENTGALAXY')
                pass
Exemple #10
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources
            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('[^A-Za-z0-9\s\.-]+', '', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            r = client.request(url)
            if '<tbody' not in r:
                return sources

            posts = client.parseDOM(r, 'tbody')[0]
            posts = client.parseDOM(posts, 'tr')

        except:
            source_utils.scraper_error('SKYTORRENTS')
            return sources

        for post in posts:
            try:
                post = re.sub(r'\n', '', post)
                post = re.sub(r'\t', '', post)
                link = re.findall(
                    'href="(magnet:.+?)".+<td style="text-align: center;color:green;">([0-9]+|[0-9]+,[0-9]+)</td>',
                    post, re.DOTALL)

                for url, seeders, in link:
                    url = unquote_plus(url).split('&tr')[0].replace(
                        '&amp;', '&').replace(' ', '.')
                    url = source_utils.strip_non_ascii_and_unprintable(url)
                    if url in str(self.sources):
                        return

                    hash = re.compile('btih:(.*?)&').findall(url)[0]

                    name = url.split('&dn=')[1]
                    name = source_utils.clean_name(title, name)
                    if source_utils.remove_lang(name, episode_title):
                        continue

                    if not source_utils.check_title(title, aliases, name, hdlr,
                                                    data['year']):
                        continue

                    # filter for episode multi packs (ex. S01E01-E17 is also returned in query)
                    if episode_title:
                        if not source_utils.filter_single_episodes(hdlr, name):
                            continue

                    try:
                        seeders = int(seeders)
                        if self.min_seeders > seeders:
                            continue
                    except:
                        seeders = 0
                        pass

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                            post)[0]
                        dsize, isize = source_utils._size(size)
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    info = ' | '.join(info)

                    sources.append({
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize
                    })
            except:
                source_utils.scraper_error('SKYTORRENTS')
                return sources
        return sources
Exemple #11
0
    def get_sources_packs(self, link):
        # log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG)
        try:
            r = client.request(link)
            if not r:
                return
            if '<tbody' not in r:
                return
            posts = client.parseDOM(r, 'tbody')[0]
            posts = client.parseDOM(posts, 'tr')
        except:
            source_utils.scraper_error('SKYTORRENTS')
            return

        for post in posts:
            try:
                post = re.sub(r'\n', '', post)
                post = re.sub(r'\t', '', post)
                link = re.findall(
                    'href="(magnet:.+?)".+<td style="text-align: center;color:green;">([0-9]+|[0-9]+,[0-9]+)</td>',
                    post, re.DOTALL)

                for url, seeders, in link:
                    url = unquote_plus(url).split('&tr')[0].replace(
                        '&amp;', '&').replace(' ', '.')
                    url = source_utils.strip_non_ascii_and_unprintable(url)
                    if url in str(self.sources):
                        return

                    hash = re.compile('btih:(.*?)&').findall(url)[0]

                    name = url.split('&dn=')[1]
                    name = source_utils.clean_name(self.title, name)
                    if source_utils.remove_lang(name):
                        continue

                    if not self.search_series:
                        if not self.bypass_filter:
                            if not source_utils.filter_season_pack(
                                    self.title, self.aliases, self.year,
                                    self.season_x, name):
                                continue
                        package = 'season'

                    elif self.search_series:
                        if not self.bypass_filter:
                            valid, last_season = source_utils.filter_show_pack(
                                self.title, self.aliases, self.imdb, self.year,
                                self.season_x, name, self.total_seasons)
                            if not valid:
                                continue
                        else:
                            last_season = self.total_seasons
                        package = 'show'

                    try:
                        seeders = int(seeders)
                        if self.min_seeders > seeders:
                            continue
                    except:
                        seeders = 0
                        pass

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                            post)[0]
                        dsize, isize = source_utils._size(size)
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    info = ' | '.join(info)

                    item = {
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize,
                        'package': package
                    }
                    if self.search_series:
                        item.update({'last_season': last_season})
                    self.sources.append(item)
            except:
                source_utils.scraper_error('SKYTORRENTS')
                pass
Exemple #12
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if url is None:
				return sources

			if debrid.status() is False:
				return sources

			data = parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU')
			aliases = data['aliases']

			hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
			hdlr2 = 'S%d - %d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

			query = '%s %s' % (title, hdlr)
			query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

			query2 = '%s %s' % (title, hdlr2)
			query2 = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query2)

			urls = []
			url = self.search_link % quote_plus(query)
			url = urljoin(self.base_link, url)
			urls.append(url)
			url2 = self.search_link % quote_plus(query2)
			url2 = urljoin(self.base_link, url2)
			urls.append(url2)
			# log_utils.log('urls = %s' % urls, log_utils.LOGDEBUG)

			for url in urls:
				try:
					r = client.request(url)
					if 'magnet' not in r:
						return sources
					r = re.sub(r'\n', '', r)
					r = re.sub(r'\t', '', r)
					tbody = client.parseDOM(r, 'tbody')
					rows = client.parseDOM(tbody, 'tr')


					for row in rows:
						links = zip(re.findall('href="(magnet:.+?)"', row, re.DOTALL), re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', row, re.DOTALL), [re.findall('<td class="text-center">([0-9]+)</td>', row, re.DOTALL)])

						for link in links:
							url = unquote_plus(link[0]).split('&tr')[0].replace('&amp;', '&').replace(' ', '.')
							url = source_utils.strip_non_ascii_and_unprintable(url)
							hash = re.compile('btih:(.*?)&').findall(url)[0]
							name = url.split('&dn=')[1]
							name = source_utils.clean_name(title, name)

							if hdlr not in name and hdlr2 not in name:
								continue

							if source_utils.remove_lang(name):
								continue

							if hdlr in name:
								t = name.split(hdlr)[0].replace(data['year'], '').replace('(', '').replace(')', '').replace('&', 'and').replace('.US.', '.').replace('.us.', '.')

							if hdlr2 in name:
								t = name.split(hdlr2)[0].replace(data['year'], '').replace('(', '').replace(')', '').replace('&', 'and').replace('.US.', '.').replace('.us.', '.')

							# if cleantitle.get(t) != cleantitle.get(title):
								# continue

							seeders = int(link[2][0])
							if self.min_seeders > seeders:
								continue

							quality, info = source_utils.get_release_quality(name, url)

							try:
								size = link[1]
								dsize, isize = source_utils._size(size)
								info.insert(0, isize)
							except:
								dsize = 0
								pass

							info = ' | '.join(info)

							sources.append({'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
														'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
				except:
					source_utils.scraper_error('NYAA')
					return sources
			return sources
		except:
			source_utils.scraper_error('NYYAA')
			return sources
Exemple #13
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('[^A-Za-z0-9\s\.-]+', '', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            r = self.scraper.get(url).content
            posts = client.parseDOM(r, 'div', attrs={'class': 'tgxtable'})

            for post in posts:
                links = zip(
                    re.findall('a href="(magnet:.+?)"', post, re.DOTALL),
                    re.findall(
                        r"<span class='badge badge-secondary' style='border-radius:4px;'>(.*?)</span>",
                        post, re.DOTALL),
                    re.findall(
                        r"<span title='Seeders/Leechers'>\[<font color='green'><b>(.*?)<",
                        post, re.DOTALL))

                for link in links:
                    url = unquote_plus(link[0]).split('&tr')[0].replace(
                        ' ', '.')
                    url = source_utils.strip_non_ascii_and_unprintable(url)
                    hash = re.compile('btih:(.*?)&').findall(url)[0]

                    name = url.split('&dn=')[1]
                    name = source_utils.clean_name(title, name)
                    if source_utils.remove_lang(name, episode_title):
                        continue

                    if not source_utils.check_title(title, aliases, name, hdlr,
                                                    data['year']):
                        continue

                    # filter for episode multi packs (ex. S01E01-E17 is also returned in query)
                    if episode_title:
                        if not source_utils.filter_single_episodes(hdlr, name):
                            continue

                    try:
                        seeders = int(link[2])
                        if self.min_seeders > seeders:
                            continue
                    except:
                        seeders = 0
                        pass

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        dsize, isize = source_utils._size(link[1])
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    info = ' | '.join(info)

                    sources.append({
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize
                    })
            return sources
        except:
            source_utils.scraper_error('TORRENTGALAXY')
            return sources
Exemple #14
0
    def get_sources(self, link):
        try:
            url = 'magnet:%s' % (re.findall('a href="magnet:(.+?)"', link,
                                            re.DOTALL)[0])
            url = unquote_plus(url).split('&tr')[0].replace('&amp;',
                                                            '&').replace(
                                                                ' ', '.')
            url = source_utils.strip_non_ascii_and_unprintable(url)
            if url in str(self.sources):
                return

            hash = re.compile('btih:(.*?)&').findall(url)[0]

            name = url.split('&dn=')[1]
            name = source_utils.clean_name(self.title, name)
            if source_utils.remove_lang(name, self.episode_title):
                return

            if not source_utils.check_title(self.title, self.aliases, name,
                                            self.hdlr, self.year):
                return

            # filter for episode multi packs (ex. S01E01-E17 is also returned in query)
            if self.episode_title:
                if not source_utils.filter_single_episodes(self.hdlr, name):
                    return

            try:
                seeders = int(
                    client.parseDOM(link, 'td',
                                    attrs={'class': 'sy'})[0].replace(',', ''))
                if self.min_seeders > seeders:
                    return
            except:
                seeders = 0
                pass

            quality, info = source_utils.get_release_quality(name, url)

            try:
                size = re.findall(
                    '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                    link)[0]
                dsize, isize = source_utils._size(size)
                info.insert(0, isize)
            except:
                source_utils.scraper_error('EXTRATORRENT')
                dsize = 0
                pass

            info = ' | '.join(info)

            self.sources.append({
                'source': 'torrent',
                'seeders': seeders,
                'hash': hash,
                'name': name,
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'direct': False,
                'debridonly': True,
                'size': dsize
            })
        except:
            source_utils.scraper_error('EXTRATORRENT')
            pass
Exemple #15
0
	def get_sources_packs(self, link, url):
		try:
			# log_utils.log('link = %s' % str(link), log_utils.LOGDEBUG)
			# log_utils.log('url = %s' % url, log_utils.LOGDEBUG)
			self.headers.update({'Referer': link})
			query_data = {
				'query': url,
				'offset': 0,
				'limit': 99,
				'filters[field]': 'seeds',
				'filters[sort]': 'desc',
				'filters[time]': 4,
				'filters[category]': 4,
				'filters[adult]': False,
				'filters[risky]': False}

			api_url = urljoin(self.base_link, self.api_search_link)
			rjson = client.request(api_url, post=query_data, headers=self.headers)

			files = json.loads(rjson)
			error = files.get('error')
			if error:
				return sources

			for file in files.get('content'):
				try:
					name = file.get('name')
					name = source_utils.clean_name(self.title, name)

					url = unquote_plus(file.get('magnet')).replace('&amp;', '&').replace(' ', '.')
					url = re.sub(r'(&tr=.+)&dn=', '&dn=', url) # some links on bitlord &tr= before &dn=
					url = url.split('&tr=')[0].split('&xl=')[0]
					url = source_utils.strip_non_ascii_and_unprintable(url)

					hash = re.compile('btih:(.*?)&').findall(url)[0]
					if source_utils.remove_lang(name):
						continue

					if not self.search_series:
						if not self.bypass_filter:
							if not source_utils.filter_season_pack(self.title, self.aliases, self.year, self.season_x, name):
								continue
						package = 'season'

					elif self.search_series:
						if not self.bypass_filter:
							valid, last_season = source_utils.filter_show_pack(self.title, self.aliases, self.imdb, self.year, self.season_x, name, self.total_seasons)
							if not valid:
								continue
						else:
							last_season = self.total_seasons
						package = 'show'

					try:
						seeders = file.get('seeds')
						if self.min_seeders > seeders:
							continue
					except:
						seeders = 0
						pass

					quality, info = source_utils.get_release_quality(name, url)

					try:
						size = file.get('size')
						size = str(size) + ' GB' if len(str(size)) == 1 else str(size) + ' MB'
						dsize, isize = source_utils._size(size)
						info.insert(0, isize)
					except:
						source_utils.scraper_error('BITLORD')
						dsize = 0
						pass

					info = ' | '.join(info)

					item = {'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
								'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'package': package}
					if self.search_series:
						item.update({'last_season': last_season})
					self.sources.append(item)
				except:
					source_utils.scraper_error('BITLORD')
					continue
		except:
			source_utils.scraper_error('BITLORD')
			pass
Exemple #16
0
    def get_sources(self, url):
        try:
            scraper = cfscrape.create_scraper()
            r = scraper.get(url).content
            if not r:
                return
            posts = client.parseDOM(r, 'div', attrs={'class': 'media'})
            for post in posts:
                # file_name = client.parseDOM(post, 'span', attrs={'class': 'file-name'}) # file_name and &dn= differ 25% of the time.  May add check
                try:
                    seeders = int(
                        re.findall(
                            r'Seeders\s+:\s+<strong class="text-success">([0-9]+|[0-9]+,[0-9]+)</strong>',
                            post, re.DOTALL)[0].replace(',', ''))
                    if self.min_seeders > seeders:
                        return
                except:
                    seeders = 0
                    pass

                link = re.findall('<a href="(magnet:.+?)"', post, re.DOTALL)
                for url in link:
                    url = unquote_plus(url).split('&tr')[0].replace(
                        '&amp;', '&').replace(' ', '.')
                    url = source_utils.strip_non_ascii_and_unprintable(url)

                    hash = re.compile('btih:(.*?)&').findall(url)[0]
                    name = url.split('&dn=')[1]
                    name = source_utils.clean_name(self.title, name)
                    if source_utils.remove_lang(name, self.episode_title):
                        continue

                    if not source_utils.check_title(self.title, self.aliases,
                                                    name, self.hdlr,
                                                    self.year):
                        continue

                    # filter for episode multi packs (ex. S01E01-E17 is also returned in query)
                    if self.episode_title:
                        if not source_utils.filter_single_episodes(
                                self.hdlr, name):
                            continue

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall(
                            '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                            post)[0]
                        dsize, isize = source_utils._size(size)
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    info = ' | '.join(info)

                    self.sources.append({
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize
                    })
        except:
            source_utils.scraper_error('BTDB')
            pass
Exemple #17
0
    def get_sources_packs(self, link):
        # log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG)
        try:
            # For some reason Zooqle returns 404 even though the response has a body.
            # This is probably a bug on Zooqle's server and the error should just be ignored.
            html = client.request(link, ignoreErrors=404)
            if not html:
                return
            html = html.replace('&nbsp;', ' ')

            try:
                results = client.parseDOM(
                    html,
                    'table',
                    attrs={
                        'class': 'table table-condensed table-torrents vmiddle'
                    })[0]
            except:
                return

            rows = re.findall('<tr(.+?)</tr>', results, re.DOTALL)
            if not rows:
                return

            for entry in rows:
                try:
                    try:
                        if 'magnet:' not in entry:
                            continue
                        url = 'magnet:%s' % (re.findall(
                            'href="magnet:(.+?)"', entry, re.DOTALL)[0])
                        url = unquote_plus(url).split('&tr')[0].replace(
                            '&amp;', '&').replace(' ', '.')
                        url = source_utils.strip_non_ascii_and_unprintable(url)
                        if url in str(self.sources):
                            continue
                    except:
                        continue

                    hash = re.compile('btih:(.*?)&').findall(url)[0]

                    try:
                        name = re.findall('<a class=".+?>(.+?)</a>', entry,
                                          re.DOTALL)[0]
                        name = client.replaceHTMLCodes(name).replace(
                            '<hl>', '').replace('</hl>', '')
                        name = unquote_plus(name)
                        name = source_utils.clean_name(self.title, name)
                        # name = url.split('&dn=')[1]
                    except:
                        continue
                    if source_utils.remove_lang(name):
                        continue

                    # some titles have foreign title translation in front so remove it
                    if './.' in name:
                        name = name.split('./.', 1)[1]
                    if '.com.' in name.lower():
                        try:
                            name = re.sub(r'(.*?)\W{2,10}', '', name)
                        except:
                            name = name.split('-.', 1)[1].lstrip()

                    if not self.search_series:
                        if not self.bypass_filter:
                            if not source_utils.filter_season_pack(
                                    self.title, self.aliases, self.year,
                                    self.season_x, name):
                                continue
                        package = 'season'

                    elif self.search_series:
                        if not self.bypass_filter:
                            valid, last_season = source_utils.filter_show_pack(
                                self.title, self.aliases, self.imdb, self.year,
                                self.season_x, name, self.total_seasons)
                            if not valid:
                                continue
                        else:
                            last_season = self.total_seasons
                        package = 'show'

                    try:
                        seeders = int(
                            re.findall(
                                'class="progress prog trans90" title="Seeders: (.+?) \|',
                                entry, re.DOTALL)[0].replace(',', ''))
                        if self.min_seeders > seeders:
                            continue
                    except:
                        seeders = 0
                        pass

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                            entry)[-1]
                        dsize, isize = source_utils._size(size)
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    info = ' | '.join(info)

                    item = {
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize,
                        'package': package
                    }
                    if self.search_series:
                        item.update({'last_season': last_season})
                    self.sources.append(item)
                except:
                    continue
        except:
            source_utils.scraper_error('ZOOGLE')
            pass