コード例 #1
0
ファイル: 7torrents.py プロジェクト: deangrice07/dg.github.io
	def sources(self, data, hostDict):
		self.sources = []
		if not data: return self.sources
		try:
			self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			self.title = self.title.replace('&', 'and').replace('Special Victims Unit', 'SVU')
			self.aliases = data['aliases']
			self.episode_title = data['title'] if 'tvshowtitle' in data else None
			self.year = data['year']
			self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else self.year

			query = '%s %s' % (self.title, self.hdlr)
			query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
			urls = []
			url = '%s%s' % (self.base_link, self.search_link % quote_plus(query))
			urls.append(url)
			urls.append(url + '&page=2')
			# log_utils.log('urls = %s' % urls)

			threads = []
			for url in urls:
				threads.append(workers.Thread(self.get_sources, url))
			[i.start() for i in threads]
			[i.join() for i in threads]
			return self.sources
		except:
			source_utils.scraper_error('7torrents')
			return self.sources
コード例 #2
0
    def sources(self, data, hostDict):
        self.sources = []
        if not data: return self.sources
        try:
            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.episode_title = data[
                'title'] if 'tvshowtitle' in data else None
            self.year = data['year']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else self.year

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
            url = '%s%s' % (self.base_link,
                            self.search_link % quote_plus(query))
            # log_utils.log('url = %s' % url)

            result = client.request(url, timeout='5')
            if not result or '<tbody' not in result: return
            table = client.parseDOM(result, 'tbody')[0]
            rows = client.parseDOM(table, 'tr')
            threads = []
            for row in rows:
                threads.append(workers.Thread(self.get_sources, row))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('ISOHUNT2')
            return self.sources
コード例 #3
0
    def sources(self, url, hostDict):
        sources = []
        if not url: return sources
        try:
            if (self.user == '' or self.password == ''): return sources

            url = urljoin(self.base_link, url)
            url = client.request(url, headers=self.headers)
            url = jsloads(url)['url']
            # log_utils.log('url = %s' % url, __name__, log_utils.LOGDEBUG)

            name = re.sub(r'(.*?)\/video/file/(.*?)/', '',
                          url).split('.smil')[0].split('-')[0]
            quality, info = source_utils.get_release_quality(name)

            sources.append({
                'provider': 'ororo',
                'source': 'direct',
                'name': name,
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'direct': True,
                'debridonly': False,
                'size': 0
            })  # Ororo does not return a file size
            return sources
        except:
            source_utils.scraper_error('ORORO')
            return sources
コード例 #4
0
ファイル: 7torrents.py プロジェクト: deangrice07/dg.github.io
	def sources_packs(self, data, hostDict, search_series=False, total_seasons=None, bypass_filter=False):
		self.sources = []
		if not data: return self.sources
		try:
			self.search_series = search_series
			self.total_seasons = total_seasons
			self.bypass_filter = bypass_filter

			self.title = data['tvshowtitle'].replace('&', 'and').replace('Special Victims Unit', 'SVU')
			self.aliases = data['aliases']
			self.imdb = data['imdb']
			self.year = data['year']
			self.season_x = data['season']
			self.season_xx = self.season_x.zfill(2)

			query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', self.title)
			queries = [
						self.search_link % quote_plus(query + ' S%s' % self.season_xx),
						self.search_link % quote_plus(query + ' Season %s' % self.season_x)]
			if search_series:
				queries = [
						self.search_link % quote_plus(query + ' Season'),
						self.search_link % quote_plus(query + ' Complete')]
			threads = []
			for url in queries:
				link = '%s%s' % (self.base_link, url)
				threads.append(workers.Thread(self.get_sources_packs, link))
			[i.start() for i in threads]
			[i.join() for i in threads]
			return self.sources
		except:
			source_utils.scraper_error('7torrents')
			return self.sources
コード例 #5
0
ファイル: bitcq.py プロジェクト: deangrice07/dg.github.io
	def sources(self, data, hostDict):
		sources = []
		if not data: return sources
		try:
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU')
			aliases = data['aliases']
			episode_title = data['title'] if 'tvshowtitle' in data else None
			year = data['year']
			hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else year
			query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', '%s %s' % (title, hdlr))
			url = '%s%s' % (self.base_link, self.search_link % quote_plus(query))
			# log_utils.log('url = %s' % url, log_utils.LOGDEBUG)
			r = client.request(url, timeout='5')
			if not r: return sources
			if any(value in str(r) for value in ['something went wrong', 'Connection timed out', '521: Web server is down', '503 Service Unavailable']):
				return sources
			table = client.parseDOM(r, 'tbody')
			rows = client.parseDOM(table, 'tr')
		except:
			source_utils.scraper_error('BITCQ')
			return sources

		for row in rows:
			try:
				if 'magnet' not in row: continue
				url = re.findall(r'href="(magnet:.+?)"', row, re.DOTALL)[0]
				url = unquote_plus(url).replace('&amp;', '&').replace(' ', '.').split('&tr')[0]
				url = source_utils.strip_non_ascii_and_unprintable(url)
				hash = re.search(r'btih:(.*?)&', url, re.I).group(1)
				name = source_utils.clean_name(url.split('&dn=')[1])

				if not source_utils.check_title(title, aliases, name, hdlr, year): continue
				name_info = source_utils.info_from_name(name, title, year, hdlr, episode_title)
				if source_utils.remove_lang(name_info): continue

				if not episode_title: # filter for eps returned in movie query (rare but movie and show exists for Run in 2020)
					ep_strings = [r'(?:\.|\-)s\d{2}e\d{2}(?:\.|\-|$)', r'(?:\.|\-)s\d{2}(?:\.|\-|$)', r'(?:\.|\-)season(?:\.|\-)\d{1,2}(?:\.|\-|$)']
					if any(re.search(item, name.lower()) for item in ep_strings): continue

				try:
					seeders = int(re.search(r'<td>(\d+)<', row).group(1))
					if self.min_seeders > seeders: continue
				except: seeders = 0

				quality, info = source_utils.get_release_quality(name_info, url)
				try:
					size = re.search(r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', row).group(0)
					dsize, isize = source_utils._size(size)
					info.insert(0, isize)
				except: dsize = 0
				info = ' | '.join(info)

				sources.append({'provider': 'bitcq', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality,
											'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
			except:
				source_utils.scraper_error('BITCQ')
				return sources
		return sources
コード例 #6
0
	def get_sources_packs(self, link):
		# log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG)
		try:
			r = client.request(link, timeout='5')
			if not r: return
			if any(value in r for value in ['something went wrong', 'Connection timed out', '521: Web server is down', '503 Service Unavailable']):
				return sources
			table = client.parseDOM(r, 'table', attrs={'id': 'table'})
			table_body = client.parseDOM(table, 'tbody')
			rows = client.parseDOM(table_body, 'tr')
		except:
			source_utils.scraper_error('TORRENTZ2')
			return
		for row in rows:
			try:
				if 'magnet:' not in row: continue
				url = re.findall(r'href\s*=\s*["\'](magnet:[^"\']+)["\']', row, re.DOTALL | re.I)[0]
				url = unquote_plus(url).replace('&amp;', '&').replace(' ', '.').split('&tr')[0]
				url = source_utils.strip_non_ascii_and_unprintable(url)
				hash = re.compile(r'btih:(.*?)&', re.I).findall(url)[0]

				name = url.split('&dn=')[1]
				name = source_utils.clean_name(name)
				if not self.search_series:
					if not self.bypass_filter:
						if not source_utils.filter_season_pack(self.title, self.aliases, self.year, self.season_x, name):
							continue
					package = 'season'

				elif self.search_series:
					if not self.bypass_filter:
						valid, last_season = source_utils.filter_show_pack(self.title, self.aliases, self.imdb, self.year, self.season_x, name, self.total_seasons)
						if not valid: continue
					else:
						last_season = self.total_seasons
					package = 'show'

				name_info = source_utils.info_from_name(name, self.title, self.year, season=self.season_x, pack=package)
				if source_utils.remove_lang(name_info): continue
				try:
					# seeders = int(client.parseDOM(row, 'td', attrs={'data-title': 'Seeds'})[0])
					seeders = int(client.parseDOM(row, 'td', attrs={'data-title': 'Last Updated'})[0]) #keep an eye on this, looks like they gaffed their col's (seeders and size)
					if self.min_seeders > seeders: continue
				except: seeders = 0

				quality, info = source_utils.get_release_quality(name_info, url)
				try:
					size = re.findall(r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', row.replace(u'\xa0', u' ').replace(u'&nbsp;', u' '))[0]
					dsize, isize = source_utils._size(size)
					info.insert(0, isize)
				except: dsize = 0
				info = ' | '.join(info)

				item = {'provider': 'torrentz2', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality,
							'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'package': package}
				if self.search_series: item.update({'last_season': last_season})
				self.sources.append(item)
			except:
				source_utils.scraper_error('TORRENTZ2')
コード例 #7
0
 def _get_token(self):
     try:
         token = self.scraper.get(self.token).content
         token = jsloads(token)[
             "token"]  # json can handle byte encoded strings
         return token
     except:
         source_utils.scraper_error('TORRENTAPI')
コード例 #8
0
 def _get_token(self):
     try:
         token = self.scraper.get(self.token).content
         if not token: return '3qk6aj27ws'
         token = jsloads(token)["token"]
         return token
     except:
         source_utils.scraper_error('TORRENTAPI')
コード例 #9
0
 def tvshow(self, imdb, tvdb, tvshowtitle, aliases, year):
     try:
         query = tvshowtitle.replace('&', 'and')
         query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
         return query
     except:
         source_utils.scraper_error('GDRIVE')
         return
コード例 #10
0
ファイル: bitcq.py プロジェクト: deangrice07/dg.github.io
	def get_sources_packs(self, link):
		# log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG)
		try:
			r = client.request(link, timeout='5')
			if not r: return
			if any(value in str(r) for value in ['something went wrong', 'Connection timed out', '521: Web server is down', '503 Service Unavailable']):
				return
			table = client.parseDOM(r, 'tbody')
			rows = client.parseDOM(table, 'tr')
		except:
			source_utils.scraper_error('BITCQ')
			return

		for row in rows:
			try:
				if 'magnet' not in row: continue
				url = re.findall(r'href="(magnet:.+?)"', row, re.DOTALL)[0]
				url = unquote_plus(url).replace('&amp;', '&').replace(' ', '.').split('&tr')[0]
				url = source_utils.strip_non_ascii_and_unprintable(url)
				hash = re.search(r'btih:(.*?)&', url, re.I).group(1)
				name = source_utils.clean_name(url.split('&dn=')[1])

				if not self.search_series:
					if not self.bypass_filter:
						if not source_utils.filter_season_pack(self.title, self.aliases, self.year, self.season_x, name):
							continue
					package = 'season'

				elif self.search_series:
					if not self.bypass_filter:
						valid, last_season = source_utils.filter_show_pack(self.title, self.aliases, self.imdb, self.year, self.season_x, name, self.total_seasons)
						if not valid: continue
					else:
						last_season = self.total_seasons
					package = 'show'

				name_info = source_utils.info_from_name(name, self.title, self.year, season=self.season_x, pack=package)
				if source_utils.remove_lang(name_info): continue

				try:
					seeders = int(re.search(r'<td>(\d+)<', row).group(1))
					if self.min_seeders > seeders: continue
				except: seeders = 0

				quality, info = source_utils.get_release_quality(name_info, url)
				try:
					size = re.search(r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', row).group(0)
					dsize, isize = source_utils._size(size)
					info.insert(0, isize)
				except: dsize = 0
				info = ' | '.join(info)

				item = {'provider': 'bitcq', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality,
							'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'package': package}
				if self.search_series: item.update({'last_season': last_season})
				self.sources.append(item)
			except:
				source_utils.scraper_error('BITCQ')
コード例 #11
0
	def get_sources_packs(self, link):
		# log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG)
		try:
			r = py_tools.ensure_str(self.scraper.get(link).content, errors='replace')
			if not r: return
			posts = client.parseDOM(r, 'div', attrs={'class': 'tgxtable'})
			if not posts: return
		except:
			source_utils.scraper_error('TORRENTGALAXY')
			return
		for post in posts:
			try:
				links = zip(
							re.findall(r'href\s*=\s*["\'](magnet:[^"\']+)["\']', post, re.DOTALL | re.I),
							re.findall(r'<span\s*class\s*=\s*["\']badge\s*badge-secondary["\']\s*style\s*=\s*["\']border-radius:4px;["\']>(.*?)</span>', post, re.DOTALL | re.I),
							re.findall(r'<span\s*title\s*=\s*["\']Seeders/Leechers["\']>\[<font\s*color\s*=\s*["\']green["\']><b>(.*?)<', post, re.DOTALL | re.I))
				for link in links:
					url = unquote_plus(link[0]).split('&tr')[0].replace(' ', '.')
					url = source_utils.strip_non_ascii_and_unprintable(url)
					hash = re.compile(r'btih:(.*?)&', re.I).findall(url)[0]

					name = url.split('&dn=')[1]
					name = source_utils.clean_name(name)
					if not self.search_series:
						if not self.bypass_filter:
							if not source_utils.filter_season_pack(self.title, self.aliases, self.year, self.season_x, name):
								continue
						package = 'season'

					elif self.search_series:
						if not self.bypass_filter:
							valid, last_season = source_utils.filter_show_pack(self.title, self.aliases, self.imdb, self.year, self.season_x, name, self.total_seasons)
							if not valid: continue
						else:
							last_season = self.total_seasons
						package = 'show'

					name_info = source_utils.info_from_name(name, self.title, self.year, season=self.season_x, pack=package)
					if source_utils.remove_lang(name_info): continue
					try:
						seeders = int(link[2])
						if self.min_seeders > seeders: continue
					except: seeders = 0

					quality, info = source_utils.get_release_quality(name_info, url)
					try:
						dsize, isize = source_utils._size(link[1])
						info.insert(0, isize)
					except: dsize = 0
					info = ' | '.join(info)

					item = {'provider': 'torrentgalaxy', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality,
								'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'package': package}
					if self.search_series: item.update({'last_season': last_season})
					self.sources.append(item)
			except:
				source_utils.scraper_error('TORRENTGALAXY')
コード例 #12
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         query = url + " S" + str(season).zfill(2) + "E" + str(
             episode).zfill(2)
         query = quote_plus(query)
         return query
     except:
         source_utils.scraper_error('GDRIVE')
         return
コード例 #13
0
	def get_sources_packs(self, link):
		# log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG)
		try:
			headers = {'User-Agent': client.agent()}
			r = py_tools.ensure_str(self.scraper.get(link, headers=headers).content, errors='replace')
			if not r or '<table' not in r: return
			table = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0]
			rows = client.parseDOM(table, 'tr')
			if not rows: return
		except:
			source_utils.scraper_error('LIMETORRENTS')
			return
		for row in rows:
			try:
				data = client.parseDOM(row, 'a', ret='href')[0]
				if '/search/' in data: continue
				data = re.sub(r'\s', '', data).strip()
				hash = re.compile(r'/torrent/(.+?).torrent', re.I).findall(data)[0]
				name = re.findall(r'title\s*=\s*(.+?)$', data, re.DOTALL | re.I)[0]
				name = source_utils.clean_name(name)
				url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name)

				if not self.search_series:
					if not self.bypass_filter:
						if not source_utils.filter_season_pack(self.title, self.aliases, self.year, self.season_x, name):
							continue
					package = 'season'

				elif self.search_series:
					if not self.bypass_filter:
						valid, last_season = source_utils.filter_show_pack(self.title, self.aliases, self.imdb, self.year, self.season_x, name, self.total_seasons)
						if not valid: continue
					else:
						last_season = self.total_seasons
					package = 'show'

				name_info = source_utils.info_from_name(name, self.title, self.year, season=self.season_x, pack=package)
				if source_utils.remove_lang(name_info): continue
				try:
					seeders = int(client.parseDOM(row, 'td', attrs={'class': 'tdseed'})[0].replace(',', ''))
					if self.min_seeders > seeders: continue
				except: seeders = 0

				quality, info = source_utils.get_release_quality(name_info, url)
				try:
					size = re.findall(r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', row)[0]
					dsize, isize = source_utils._size(size)
					info.insert(0, isize)
				except: dsize = 0
				info = ' | '.join(info)

				item = {'provider': 'limetorrents', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality,
							'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'package': package}
				if self.search_series: item.update({'last_season': last_season})
				self.sources.append(item)
			except:
				source_utils.scraper_error('LIMETORRENTS')
コード例 #14
0
	def episode(self, url, imdb, tvdb, title, premiered, season, episode):
		try:
			if not url: return
			url = parse_qs(url)
			url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
			url['title'], url['premiered'], url['season'], url['episode'] = title, premiered, season, episode
			url = urlencode(url)
			return url
		except:
			source_utils.scraper_error('ETTV')
コード例 #15
0
 def movie(self, imdb, title, aliases, year):
     try:
         title = title.replace('&', 'and')
         query = '%s %s' % (title, str(year))
         query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
         query = quote_plus(query)
         return query
     except:
         source_utils.scraper_error('GDRIVE')
         return
コード例 #16
0
	def get_sources_packs(self, link):
		# log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG)
		try:
			r = client.request(link, timeout='5')
			if not r: return
			posts = client.parseDOM(r, 'div', attrs={'class': 'media'})
		except:
			source_utils.scraper_error('BTDB')
			return
		for post in posts:
			try:
				if 'magnet:' not in post: continue
				url = re.findall(r'href\s*=\s*["\'](magnet:[^"\']+)["\']', post, re.DOTALL | re.I)[0]
				url = unquote_plus(url).replace('&amp;', '&').replace(' ', '.').split('&tr')[0]
				url = source_utils.strip_non_ascii_and_unprintable(url)
				if url in str(self.sources): return
				hash = re.compile(r'btih:(.*?)&', re.I).findall(url)[0]
				name = url.split('&dn=')[1]
				name = source_utils.clean_name(name)

				if not self.search_series:
					if not self.bypass_filter:
						if not source_utils.filter_season_pack(self.title, self.aliases, self.year, self.season_x, name):
							continue
					package = 'season'

				elif self.search_series:
					if not self.bypass_filter:
						valid, last_season = source_utils.filter_show_pack(self.title, self.aliases, self.imdb, self.year, self.season_x, name, self.total_seasons)
						if not valid: continue
					else: last_season = self.total_seasons
					package = 'show'

				name_info = source_utils.info_from_name(name, self.title, self.year, season=self.season_x, pack=package)
				if source_utils.remove_lang(name_info): continue

				try:
					seeders = int(re.findall(r'Seeders.*?["\']>([0-9]+|[0-9]+,[0-9]+)</strong>', post, re.DOTALL | re.I)[0].replace(',', ''))
					if self.min_seeders > seeders: return
				except: seeders = 0

				quality, info = source_utils.get_release_quality(name_info, url)
				try:
					size = re.findall(r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))', post)[0]
					dsize, isize = source_utils._size(size)
					info.insert(0, isize)
				except: dsize = 0
				info = ' | '.join(info)

				item = {'provider': 'btdb', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality,
							'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'package': package}
				if self.search_series: item.update({'last_season': last_season})
				self.sources.append(item)
			except:
				source_utils.scraper_error('BTDB')
コード例 #17
0
	def sources(self, url, hostDict):
		sources = []
		if not url: return sources
		try:
			data = parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			title = data['title'].replace('&', 'and')
			aliases = data['aliases']
			hdlr = data['year']
			year = data['year']
			imdb = data['imdb']

			url = self.search_link % imdb
			api_url = urljoin(self.base_link, url)
			# log_utils.log('api_url = %s' % api_url, log_utils.LOGDEBUG)

			rjson = client.request(api_url, timeout='5')
			if not rjson: return sources
			files = jsloads(rjson)
			if files.get('status') == 'error' or files.get('data').get('movie_count') == 0:
				return sources
			title_long = files.get('data').get('movies')[0].get('title_long').replace(' ', '.')
			torrents = files.get('data').get('movies')[0].get('torrents')
		except:
			source_utils.scraper_error('YTSMX')
			return sources
		for torrent in torrents:
			try:
				quality = torrent.get('quality')
				type = torrent.get('type')
				hash = torrent.get('hash')
				name = '%s.[%s].[%s].[YTS.MX]' % (title_long, quality, type)
				url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name)
				if not source_utils.check_title(title, aliases, name, hdlr, year): continue
				name_info = source_utils.info_from_name(name, title, year, hdlr)
				if source_utils.remove_lang(name_info): continue
				try:
					seeders = torrent.get('seeds')
					if self.min_seeders > seeders: continue
				except: seeders = 0

				quality, info = source_utils.get_release_quality(name_info, url)
				try:
					size = torrent.get('size')
					dsize, isize = source_utils._size(size)
					info.insert(0, isize)
				except: dsize = 0
				info = ' | '.join(info)

				sources.append({'provider': 'ytsmx', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info,
											'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
			except:
				source_utils.scraper_error('YTSMX')
		return sources
コード例 #18
0
 def ororo_tvcache(self, user):
     try:
         url = urljoin(self.base_link, self.tvsearch_link)
         r = client.request(url, headers=self.headers)
         r = jsloads(r)['shows']
         r = [(str(i['id']), str(i['imdb_id'])) for i in r]
         r = [(i[0], 'tt' + re.sub(r'[^0-9]', '', i[1])) for i in r]
         return r
     except:
         source_utils.scraper_error('ORORO')
         return
コード例 #19
0
 def tvshow(self, imdb, tvdb, tvshowtitle, aliases, year):
     try:
         return urlencode({
             'imdb': imdb,
             'tvdb': tvdb,
             'tvshowtitle': tvshowtitle,
             'year': year
         })
     except:
         source_utils.scraper_error('LIBRARY')
         return
コード例 #20
0
    def get_items(self, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = client.request(url, headers=headers, timeout='10')
            if not r or '<tbody' not in r: return
            posts = client.parseDOM(r, 'tbody')[0]
            posts = client.parseDOM(posts, 'tr')
        except:
            source_utils.scraper_error('1337X')
            return
        for post in posts:
            try:
                data = client.parseDOM(post, 'a', ret='href')[1]
                link = urljoin(self.base_link, data)

                name = client.parseDOM(post, 'a')[1]
                name = source_utils.clean_name(unquote_plus(name))
                if not source_utils.check_title(self.title, self.aliases, name,
                                                self.hdlr, self.year):
                    continue
                name_info = source_utils.info_from_name(
                    name, self.title, self.year, self.hdlr, self.episode_title)
                if source_utils.remove_lang(name_info): continue

                if not self.episode_title:  #filter for eps returned in movie query (rare but movie and show exists for Run in 2020)
                    ep_strings = [
                        r'[.-]s\d{2}e\d{2}([.-]?)', r'[.-]s\d{2}([.-]?)',
                        r'[.-]season[.-]?\d{1,2}[.-]?'
                    ]
                    if any(
                            re.search(item, name.lower())
                            for item in ep_strings):
                        continue
                try:
                    seeders = int(
                        client.parseDOM(post,
                                        'td',
                                        attrs={'class': 'coll-2 seeds'
                                               })[0].replace(',', ''))
                    if self.min_seeders > seeders: continue
                except:
                    seeders = 0
                try:
                    size = re.findall(
                        r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                        post)[0]
                    dsize, isize = source_utils._size(size)
                except:
                    isize = '0'
                    dsize = 0
                self.items.append(
                    (name, name_info, link, isize, dsize, seeders))
            except:
                source_utils.scraper_error('1337X')
コード例 #21
0
    def sources_packs(self,
                      url,
                      hostDict,
                      search_series=False,
                      total_seasons=None,
                      bypass_filter=False):
        self.sources = []
        if not url: return self.sources
        try:
            self.search_series = search_series
            self.total_seasons = total_seasons
            self.bypass_filter = bypass_filter

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data['tvshowtitle'].replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.imdb = data['imdb']
            self.year = data['year']
            self.season_x = data['season']
            self.season_xx = self.season_x.zfill(2)

            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', self.title)
            queries = [
                self.search_link.format(
                    query[0].lower(),
                    cleantitle.geturl(query + ' S%s' % self.season_xx)),
                self.search_link.format(
                    query[0].lower(),
                    cleantitle.geturl(query + ' Season %s' % self.season_x))
            ]
            if search_series:
                queries = [
                    self.search_link.format(
                        query[0].lower(),
                        cleantitle.geturl(query + ' Season')),
                    self.search_link.format(
                        query[0].lower(),
                        cleantitle.geturl(query + ' Complete'))
                ]
            threads = []
            for url in queries:
                link = urljoin(self.base_link, url)
                threads.append(workers.Thread(self.get_sources_packs, link))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('MAGNETDL')
            return self.sources
コード例 #22
0
 def tvshow(self, imdb, tvdb, tvshowtitle, aliases, year):
     try:
         if (self.user == '' or self.password == ''): return
         url = cache.get(self.ororo_tvcache, 120, self.user)
         if not url: return
         url = [i[0] for i in url if imdb == i[1]]
         if not url: return
         url = self.show_link % url[0]
         return url
     except:
         source_utils.scraper_error('ORORO')
         return
コード例 #23
0
 def get_pack_items(self, url):
     try:
         r = client.request(url, timeout='5')
         if not r: return
         links = re.findall(r'<a\s*href\s*=\s*["\'](.+?torrent.html)["\']',
                            r, re.I)
         for link in links:
             url = '%s%s' % (self.base_link, link)
             self.items.append((url))
         return self.items
     except:
         source_utils.scraper_error('TORRENTPROJECT2')
コード例 #24
0
    def sources(self, data, hostDict):
        sources = []
        if not data: return sources
        try:
            if (self.user == '' or self.password == ''): return sources

            url = cache.get(self.ororo_tvcache, 120, self.user)
            if not url: return sources
            url = [i[0] for i in url if data['imdb'] == i[1]]
            if not url: return sources
            url = self.show_link % url[0]

            url = urljoin(self.base_link, url)
            r = client.request(url, headers=self.headers)
            r = jsloads(r)['episodes']
            r = [(str(i['id']), str(i['season']), str(i['number']),
                  str(i['airdate'])) for i in r]
            url = [
                i for i in r
                if data['season'] == i[1] and data['episode'] == i[2]
            ]
            url += [i for i in r if data['premiered'] == i[3]]
            if not url: return sources
            url = self.episode_link % url[0][0]

            url = urljoin(self.base_link, url)
            url = client.request(url, headers=self.headers)
            if not url: return sources
            url = jsloads(url)['url']
            # log_utils.log('url = %s' % url, __name__)

            name = re.sub(r'(.*?)\/video/file/(.*?)/', '',
                          url).split('.smil')[0].split('-')[0]
            quality, info = source_utils.get_release_quality(name)
            info = ' | '.join(info)

            sources.append({
                'provider': 'ororo',
                'source': 'direct',
                'name': name,
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'direct': True,
                'debridonly': False,
                'size': 0
            })  # Ororo does not return a file size
            return sources
        except:
            source_utils.scraper_error('ORORO')
            return sources
コード例 #25
0
 def movie(self, imdb, title, aliases, year):
     try:
         url = {
             'imdb': imdb,
             'title': title,
             'aliases': aliases,
             'year': year
         }
         url = urlencode(url)
         return url
     except:
         source_utils.scraper_error('300MBFILMS')
         return
コード例 #26
0
 def movie(self, imdb, title, aliases,
           year):  # seems Ororo does not provide Movies
     try:
         if (self.user == '' or self.password == ''): return
         url = cache.get(self.ororo_moviecache, 60, self.user)
         if not url: return
         url = [i[0] for i in url if imdb == i[1]]
         if not url: return
         url = self.movie_link % url[0]
         return url
     except:
         source_utils.scraper_error('ORORO')
         return
コード例 #27
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if not url: return
         url = parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         url.update({
             'premiered': premiered,
             'season': season,
             'episode': episode
         })
         return urlencode(url)
     except:
         source_utils.scraper_error('LIBRARY')
         return
コード例 #28
0
 def __get_base_url(self, fallback):
     for domain in self.domains:
         try:
             url = 'https://%s' % domain
             result = client.request(url, limit=1, timeout='5')
             try:
                 result = re.findall('r<title>(.+?)</title>', result,
                                     re.DOTALL)[0]
             except:
                 result = None
             if result and 'Kickass' in result: return url
         except:
             source_utils.scraper_error('KICKASS2')
     return fallback
コード例 #29
0
 def tvshow(self, imdb, tvdb, tvshowtitle, aliases, year):
     try:
         url = {
             'imdb': imdb,
             'tvdb': tvdb,
             'tvshowtitle': tvshowtitle,
             'aliases': aliases,
             'year': year
         }
         url = urlencode(url)
         return url
     except:
         source_utils.scraper_error('RLSBB')
         return
コード例 #30
0
ファイル: bitlord.py プロジェクト: deangrice07/dg.github.io
	def _get_token_and_cookies(self):
		headers = None
		try:
			# returned from client (result, response_code, response_headers, headers, cookie)
			post = client.request(self.base_link, output='extended', timeout='10')
			if not post: return headers
			token_id = re.findall(r'token\: (.*)\n', post[0])[0]
			token = ''.join(re.findall(token_id + r" ?\+?\= ?'(.*)'", post[0]))
			headers = post[3]
			headers.update({'Cookie': post[4].replace('SameSite=Lax, ', ''), 'X-Request-Token': token})
			return headers
		except:
			source_utils.scraper_error('BITLORD')
			return headers