예제 #1
0
	def sources(self, data, hostDict):
		sources = []
		if not data: return sources
		api_key = self.get_api()
		if not api_key: return sources
		try:
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU')
			aliases = data['aliases'] # not used atm
			episode_title = data['title'] if 'tvshowtitle' in data else None
			year = data['year']
			hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else year

			content_type = 'episode' if 'tvshowtitle' in data else 'movie'
			match = 'extended'
			moderated = 'no' if content_type == 'episode' else 'yes'
			search_in = ''

			if content_type == 'movie':
				years = '%s+|+%s+|+%s' % (str(int(year) - 1), year, str(int(year) + 1))
				query = '@name+%s+%s' % (title, years)

			elif content_type == 'episode':
				season = int(data['season'])
				episode = int(data['episode'])
				seasEpList = self._seas_ep_query_list(season, episode)
				query = '@name+%s+@files+%s+|+%s+|+%s+|+%s+|+%s' % (title, seasEpList[0], seasEpList[1], seasEpList[2], seasEpList[3], seasEpList[4])

			s = requests.Session()
			link = self.base_link + self.search_link % (api_key, query, match, moderated, search_in)

			p = s.get(link)
			p = jsloads(p.text)
			if p.get('status') != 'ok': return

			files = p.get('files')
			if not files: return sources
			for i in files:
				if i['is_ready'] == '1' and i['type'] == 'video':
					try:
						source = 'direct SINGLE'
						if int(i['files_num_video']) > 3:
							source = ' direct PACK (x%02d)' % int(i['files_num_video'])
						file_name = i['name']
						name = source_utils.clean_name(file_name)
						name_info = source_utils.info_from_name(name, title, year, hdlr, episode_title)

						file_id = i['id']
						file_dl = i['url_dl']

						if content_type == 'episode':
							url = jsdumps({'content': 'episode', 'file_id': file_id, 'season': season, 'episode': episode})
						else:
							url = jsdumps({'content': 'movie', 'file_id': file_id, 'title': title, 'year': year})

						quality, info = source_utils.get_release_quality(name_info, file_dl)
						try:
							size = float(i['size'])
							if 'PACK' in source:
								size = float(size) / int(i['files_num_video'])
							dsize, isize = source_utils.convert_size(size, to='GB')
							if isize: info.insert(0, isize)
						except:
							source_utils.scraper_error('FURK')
							dsize = 0
						info = ' | '.join(info)

						sources.append({'provider': 'furk', 'source': source, 'name': name, 'name_info': name_info, 'quality': quality, 'language': "en", 'url': url,
													'info': info, 'direct': True, 'debridonly': False, 'size': dsize})
					except:
						source_utils.scraper_error('FURK')
				else:
					continue
			return sources
		except:
			source_utils.scraper_error('FURK')
예제 #2
0
    def sources(self, data, hostDict):
        sources = []
        if not data: return sources
        try:
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            self.year = data['year']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else self.year
            self.season = str(
                data['season']) if 'tvshowtitle' in data else None
            self.episode = str(
                data['episode']) if 'tvshowtitle' in data else None
            query_list = self.episode_query_list(
            ) if 'tvshowtitle' in data else self.year_query_list()
            # log_utils.log('query_list = %s' % query_list)
            cloud_files = premiumize.Premiumize().my_files_all()
            if not cloud_files: return sources
            cloud_files = [
                i for i in cloud_files if i['path'].lower().endswith(
                    tuple(supported_video_extensions()))
            ]  # this only lets folder names thru with known video extensions..?
            if not cloud_files: return sources
            ignoreM2ts = getSetting('pm_cloud.ignore.m2ts') == 'true'
            extras_filter = cloud_utils.extras_filter()
        except:
            from resources.lib.modules import log_utils
            log_utils.error('PM_CLOUD: ')
            return sources

        for item in cloud_files:
            is_m2ts = False
            try:
                name = item.get('name', '')
                invalids = ('.img', '.bin', '.dat', '.mpls', '.mpl', '.bdmv',
                            '.bdm', '.disc')
                if name.lower().endswith(invalids): continue

                path = item.get('path', '').lower()
                if not cloud_utils.cloud_check_title(title, aliases, path):
                    continue
                rt = cloud_utils.release_title_format(name)
                if any(value in rt for value in extras_filter): continue

                if name.endswith('m2ts'):
                    if ignoreM2ts: continue
                    name = item.get('path', '').split('/')[0]
                    if name in str(sources): continue
                    if all(not bool(re.search(i, rt)) for i in query_list):
                        continue  # check if this newly added causes any movie titles that do not have the year to get dropped
                    is_m2ts = True
                    m2ts_files = [
                        i for i in cloud_files if name in i.get('path')
                    ]
                    largest = sorted(m2ts_files,
                                     key=lambda k: k['size'],
                                     reverse=True)[0]
                    url_id = largest.get('id', '')
                    size = largest.get('size', '')
                else:
                    if all(not bool(re.search(i, rt)) for i in query_list):
                        if 'tvshowtitle' in data:
                            season_folder_list = self.season_folder_list()
                            if all(not bool(re.search(i, path))
                                   for i in season_folder_list):
                                continue
                            episode_list = self.episode_list()
                            if all(not bool(re.search(i, rt))
                                   for i in episode_list):
                                continue
                        else:
                            if all(not bool(re.search(i, path))
                                   for i in query_list):
                                continue
                            name = item.get('path', '').split('/')[0]
                            if item.get('size') < 52428800: continue
                    url_id = item.get('id', '')
                    size = item.get('size', '')

                name_info = fs_utils.info_from_name(name, title, self.year,
                                                    hdlr, episode_title)
                quality, info = fs_utils.get_release_quality(name_info, name)
                try:
                    dsize, isize = fs_utils.convert_size(size, to='GB')
                    info.insert(0, isize)
                except:
                    dsize = 0
                if is_m2ts: info.append('M2TS')
                info = ' | '.join(info)

                sources.append({
                    'provider': 'pm_cloud',
                    'source': 'cloud',
                    'debrid': 'Premiumize.me',
                    'seeders': '',
                    'hash': '',
                    'name': name,
                    'name_info': name_info,
                    'quality': quality,
                    'language': 'en',
                    'url': url_id,
                    'info': info,
                    'direct': True,
                    'debridonly': True,
                    'size': dsize
                })
            except:
                from resources.lib.modules import log_utils
                log_utils.error('PM_CLOUD: ')
                return sources
        return sources
예제 #3
0
	def sources(self, data, hostDict):
		sources = []
		if not data: return sources
		try:
			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU')
			aliases = data['aliases']
			episode_title = data['title'] if 'tvshowtitle' in data else None
			self.year = data['year']
			hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else self.year
			self.season = str(data['season']) if 'tvshowtitle' in data else None
			self.episode = str(data['episode']) if 'tvshowtitle' in data else None
			query_list = self.episode_query_list() if 'tvshowtitle' in data else self.year_query_list()
			# log_utils.log('query_list = %s' % query_list)
			cloud_folders = realdebrid.RealDebrid().user_torrents()
			if not cloud_folders: return sources
			cloud_folders = [i for i in cloud_folders if i['status'] == 'downloaded']
			if not cloud_folders: return sources
			ignoreM2ts = getSetting('rd_cloud.ignore.m2ts') == 'true'
			extras_filter = cloud_utils.extras_filter()
		except:
			from resources.lib.modules import log_utils
			log_utils.error('RD_CLOUD: ')
			return sources

		for folder in cloud_folders:
			is_m2ts = False
			try:
				folder_name = folder.get('filename', '')
				if not cloud_utils.cloud_check_title(title, aliases, folder_name): continue
				id = folder.get('id', '')
				torrent_info = realdebrid.RealDebrid().torrent_info(id)
				folder_files = torrent_info['files']
				folder_files = [i for i in folder_files if i['selected'] == 1]
			except:
				from resources.lib.modules import log_utils
				log_utils.error('RD_CLOUD: ')
				return sources

			for file in folder_files:
				try:
					name = file.get('path').lstrip('/')
					rt = cloud_utils.release_title_format(name)
					if not name.lower().endswith(tuple(supported_video_extensions())): continue
					if any(value in rt for value in extras_filter): continue

					if name.endswith('m2ts'):
						if ignoreM2ts: continue
						name = folder_name
						rt = cloud_utils.release_title_format(name)
						if name in str(sources): continue
						if all(not bool(re.search(i, rt)) for i in query_list): continue  # check if this newly added causes any movie titles that do not have the year to get dropped
						is_m2ts = True
						largest = sorted(folder_files, key=lambda k: k['bytes'], reverse=True)[0]
						index_pos = folder_files.index(largest)
						size = largest['bytes']
						try: link = torrent_info['links'][index_pos]
						except: link = torrent_info['links'][0]
					else:
						if all(not bool(re.search(i, rt)) for i in query_list):
							if 'tvshowtitle' in data:
								season_folder_list = self.season_folder_list()
								nl = name.lower()
								if all(not bool(re.search(i, nl)) for i in season_folder_list): continue
								episode_list = self.episode_list()
								if all(not bool(re.search(i, rt)) for i in episode_list): continue
							else:
								if all(not bool(re.search(i, folder_name)) for i in query_list): continue
								name = folder_name
								if file.get('bytes') < 52428800: continue

						name = name.split('/')
						name = name[len(name)-1]
						index_pos = folder_files.index(file)
						link = torrent_info['links'][index_pos]
						size = file.get('bytes', '')

					name_info = fs_utils.info_from_name(name, title, self.year, hdlr, episode_title)
					hash = folder.get('hash', '')
					quality, info = fs_utils.get_release_quality(name_info, name)
					try:
						dsize, isize = fs_utils.convert_size(size, to='GB')
						info.insert(0, isize)
					except: dsize = 0
					if is_m2ts: info.append('M2TS')
					info = ' / '.join(info)

					sources.append({'provider': 'rd_cloud', 'source': 'cloud', 'debrid': 'Real-Debrid', 'seeders': '', 'hash': hash, 'name': name, 'name_info': name_info,
												'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': True, 'debridonly': True, 'size': dsize})
				except:
					from resources.lib.modules import log_utils
					log_utils.error('RD_CLOUD: ')
					return sources
		return sources
예제 #4
0
    def get_sources_packs(self, link):
        try:
            # log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG)
            rjson = client.request(link, timeout='5')
            if not rjson or rjson == 'null' or any(
                    value in rjson for value in [
                        '521 Origin Down', 'No results returned',
                        'Connection Time-out', 'Database maintenance'
                    ]):
                return
            files = jsloads(rjson)
        except:
            source_utils.scraper_error('TORRENTPARADISE')
            return
        for file in files:
            try:
                hash = file['id']
                name = source_utils.clean_name(file['text'])

                if not self.search_series:
                    if not self.bypass_filter:
                        if not source_utils.filter_season_pack(
                                self.title, self.aliases, self.year,
                                self.season_x, name):
                            continue
                    package = 'season'

                elif self.search_series:
                    if not self.bypass_filter:
                        valid, last_season = source_utils.filter_show_pack(
                            self.title, self.aliases, self.imdb, self.year,
                            self.season_x, name, self.total_seasons)
                        if not valid: continue
                    else:
                        last_season = self.total_seasons
                    package = 'show'

                name_info = source_utils.info_from_name(name,
                                                        self.title,
                                                        self.year,
                                                        season=self.season_x,
                                                        pack=package)
                if source_utils.remove_lang(name_info): continue

                url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name)
                try:
                    seeders = int(file['s'])
                    if self.min_seeders > seeders: continue
                except:
                    seeders = 0

                quality, info = source_utils.get_release_quality(
                    name_info, url)
                try:
                    dsize, isize = source_utils.convert_size(float(
                        file["len"]),
                                                             to='GB')
                    info.insert(0, isize)
                except:
                    dsize = 0
                info = ' | '.join(info)

                item = {
                    'provider': 'torrentparadise',
                    'source': 'torrent',
                    'seeders': seeders,
                    'hash': hash,
                    'name': name,
                    'name_info': name_info,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'package': package
                }
                if self.search_series:
                    item.update({'last_season': last_season})
                self.sources.append(item)
            except:
                source_utils.scraper_error('TORRENTPARADISE')
예제 #5
0
    def sources(self, url, hostDict):
        sources = []
        if not url: return sources
        try:
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            year = data['year']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else year

            query = '%s %s' % (title, hdlr)
            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url)

            rjson = client.request(url, timeout='5')
            if not rjson or rjson == 'null' or any(
                    value in rjson for value in [
                        '521 Origin Down', 'No results returned',
                        'Connection Time-out', 'Database maintenance'
                    ]):
                return sources
            files = jsloads(rjson)
        except:
            source_utils.scraper_error('TORRENTPARADISE')
            return sources
        for file in files:
            try:
                hash = file['id']
                name = source_utils.clean_name(file['text'])

                if not source_utils.check_title(title, aliases, name, hdlr,
                                                year):
                    continue
                name_info = source_utils.info_from_name(
                    name, title, year, hdlr, episode_title)
                if source_utils.remove_lang(name_info): continue

                url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name)

                if not episode_title:  #filter for eps returned in movie query (rare but movie and show exists for Run in 2020)
                    ep_strings = [
                        r'(?:\.|\-)s\d{2}e\d{2}(?:\.|\-|$)',
                        r'(?:\.|\-)s\d{2}(?:\.|\-|$)',
                        r'(?:\.|\-)season(?:\.|\-)\d{1,2}(?:\.|\-|$)'
                    ]
                    if any(
                            re.search(item, name.lower())
                            for item in ep_strings):
                        continue
                try:
                    seeders = int(file['s'])
                    if self.min_seeders > seeders: continue
                except:
                    seeders = 0

                quality, info = source_utils.get_release_quality(
                    name_info, url)
                try:
                    dsize, isize = source_utils.convert_size(float(
                        file["len"]),
                                                             to='GB')
                    info.insert(0, isize)
                except:
                    dsize = 0
                info = ' | '.join(info)

                sources.append({
                    'provider': 'torrentparadise',
                    'source': 'torrent',
                    'seeders': seeders,
                    'hash': hash,
                    'name': name,
                    'name_info': name_info,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize
                })
            except:
                source_utils.scraper_error('TORRENTPARADISE')
        return sources
예제 #6
0
    def sources_packs(self,
                      data,
                      hostDict,
                      search_series=False,
                      total_seasons=None,
                      bypass_filter=False):
        sources = []
        if not data: return sources
        if search_series:  # torrentapi does not have showPacks
            return sources
        try:
            self.scraper = cfscrape.create_scraper()
            self.key = cache.get(self._get_token,
                                 0.2)  # 800 secs token is valid for

            self.bypass_filter = bypass_filter

            self.title = data['tvshowtitle'].replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.year = data['year']
            self.season_x = data['season']
            self.season_xx = self.season_x.zfill(2)
            search_link = self.tvshowsearch.format(self.key, data['imdb'],
                                                   'S%s' % self.season_xx)
            # log_utils.log('search_link = %s' % str(search_link))
            sleep(2.1)
            rjson = self.scraper.get(search_link).content
            if not rjson or 'torrent_results' not in str(rjson): return sources
            files = jsloads(rjson)['torrent_results']
        except:
            source_utils.scraper_error('TORRENTAPI')
            return sources
        for file in files:
            try:
                url = file["download"].split('&tr')[0]
                hash = re.search(r'btih:(.*?)&', url, re.I).group(1)
                name = source_utils.clean_name(unquote_plus(file["title"]))

                if not self.bypass_filter:
                    if not source_utils.filter_season_pack(
                            self.title, self.aliases, self.year, self.season_x,
                            name):
                        continue
                package = 'season'

                name_info = source_utils.info_from_name(name,
                                                        self.title,
                                                        self.year,
                                                        season=self.season_x,
                                                        pack=package)
                if source_utils.remove_lang(name_info): continue
                try:
                    seeders = int(file["seeders"])
                    if self.min_seeders > seeders: continue
                except:
                    seeders = 0

                quality, info = source_utils.get_release_quality(
                    name_info, url)
                try:
                    dsize, isize = source_utils.convert_size(file["size"],
                                                             to='GB')
                    info.insert(0, isize)
                except:
                    dsize = 0
                info = ' | '.join(info)

                sources.append({
                    'provider': 'torrentapi',
                    'source': 'torrent',
                    'seeders': seeders,
                    'hash': hash,
                    'name': name,
                    'name_info': name_info,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'package': package
                })
            except:
                source_utils.scraper_error('TORRENTAPI')
        return sources
예제 #7
0
    def sources(self, data, hostDict):
        sources = []
        if not data: return sources
        try:
            self.scraper = cfscrape.create_scraper()
            self.key = cache.get(self._get_token,
                                 0.2)  # 800 secs token is valid for

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            year = data['year']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else year

            query = '%s %s' % (title, hdlr)
            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
            if 'tvshowtitle' in data:
                search_link = self.tvshowsearch.format(self.key, data['imdb'],
                                                       hdlr)
            else:
                search_link = self.msearch.format(self.key, data['imdb'])
            sleep(2.1)
            rjson = self.scraper.get(search_link).content
            if not rjson or 'torrent_results' not in str(rjson): return sources
            files = jsloads(rjson)['torrent_results']
        except:
            source_utils.scraper_error('TORRENTAPI')
            return sources
        for file in files:
            try:
                url = file["download"].split('&tr')[0]
                hash = re.search(r'btih:(.*?)&', url, re.I).group(1)
                name = source_utils.clean_name(unquote_plus(file["title"]))

                if not source_utils.check_title(title, aliases, name, hdlr,
                                                year):
                    continue
                name_info = source_utils.info_from_name(
                    name, title, year, hdlr, episode_title)
                if source_utils.remove_lang(name_info): continue

                if not episode_title:  #filter for eps returned in movie query (rare but movie and show exists for Run in 2020)
                    ep_strings = [
                        r'[.-]s\d{2}e\d{2}([.-]?)', r'[.-]s\d{2}([.-]?)',
                        r'[.-]season[.-]?\d{1,2}[.-]?'
                    ]
                    if any(
                            re.search(item, name.lower())
                            for item in ep_strings):
                        continue
                try:
                    seeders = int(file["seeders"])
                    if self.min_seeders > seeders: continue
                except:
                    seeders = 0

                quality, info = source_utils.get_release_quality(
                    name_info, url)
                try:
                    dsize, isize = source_utils.convert_size(file["size"],
                                                             to='GB')
                    info.insert(0, isize)
                except:
                    dsize = 0
                info = ' | '.join(info)

                sources.append({
                    'provider': 'torrentapi',
                    'source': 'torrent',
                    'seeders': seeders,
                    'hash': hash,
                    'name': name,
                    'name_info': name_info,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize
                })
            except:
                source_utils.scraper_error('TORRENTAPI')
        return sources
예제 #8
0
    def sources(self, data, hostDict):
        sources = []
        if not data: return sources
        try:
            api_key = getSetting('filepursuit.api')
            if api_key == '': return sources
            headers = {
                "x-rapidapi-host": "filepursuit.p.rapidapi.com",
                "x-rapidapi-key": api_key
            }

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            year = data['year']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else year

            query = '%s %s' % (title, hdlr)
            query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
            url = '%s%s' % (self.base_link,
                            self.search_link % quote_plus(query))
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            r = client.request(url, headers=headers)
            if not r: return sources
            r = jsloads(r)
            if 'not_found' in r['status']: return sources
            results = r['files_found']
        except:
            source_utils.scraper_error('FILEPURSUIT')
            return sources
        for item in results:
            try:
                url = item['file_link']
                try:
                    size = int(item['file_size_bytes'])
                except:
                    size = 0
                try:
                    name = item['file_name']
                except:
                    name = item['file_link'].split('/')[-1]
                name = source_utils.clean_name(name)

                if not source_utils.check_title(title, aliases, name, hdlr,
                                                year):
                    continue
                name_info = source_utils.info_from_name(
                    name, title, year, hdlr, episode_title)
                if source_utils.remove_lang(name_info): continue

                # link_header = client.request(url, output='headers', timeout='5') # to slow to check validity of links
                # if not any(value in str(link_header) for value in ['stream', 'video/mkv']): continue

                quality, info = source_utils.get_release_quality(
                    name_info, url)
                try:
                    dsize, isize = source_utils.convert_size(size, to='GB')
                    if isize: info.insert(0, isize)
                except:
                    dsize = 0
                info = ' | '.join(info)

                sources.append({
                    'provider': 'filepursuit',
                    'source': 'direct',
                    'quality': quality,
                    'name': name,
                    'name_info': name_info,
                    'language': "en",
                    'url': url,
                    'info': info,
                    'direct': True,
                    'debridonly': False,
                    'size': dsize
                })
            except:
                source_utils.scraper_error('FILEPURSUIT')
        return sources
예제 #9
0
    def get_sources(self, url):
        try:
            r = client.request(url, timeout='5')
            if not r: return
            results = jsloads(r)['results']
        except:
            source_utils.scraper_error('SOLIDTORRENTS')
            return
        for item in results:
            try:
                url = unquote_plus(item['magnet']).replace(' ', '.')
                url = re.sub(
                    r'(&tr=.+)&dn=', '&dn=',
                    url)  # some links on solidtorrents &tr= before &dn=
                url = source_utils.strip_non_ascii_and_unprintable(url)
                hash = item['infohash'].lower()
                if url in str(self.sources): continue

                name = source_utils.clean_name(item['title'])
                if not source_utils.check_title(self.title, self.aliases, name,
                                                self.hdlr, self.year):
                    continue
                name_info = source_utils.info_from_name(
                    name, self.title, self.year, self.hdlr, self.episode_title)
                if source_utils.remove_lang(name_info): continue

                if not self.episode_title:  #filter for eps returned in movie query (rare but movie and show exists for Run in 2020)
                    ep_strings = [
                        r'[.-]s\d{2}e\d{2}([.-]?)', r'[.-]s\d{2}([.-]?)',
                        r'[.-]season[.-]?\d{1,2}[.-]?'
                    ]
                    if any(
                            re.search(item, name.lower())
                            for item in ep_strings):
                        continue
                try:
                    seeders = int(item['swarm']['seeders'])
                    if self.min_seeders > seeders: continue
                except:
                    seeders = 0

                quality, info = source_utils.get_release_quality(
                    name_info, url)
                try:
                    dsize, isize = source_utils.convert_size(item["size"],
                                                             to='GB')
                    info.insert(0, isize)
                except:
                    dsize = 0
                info = ' | '.join(info)

                self.sources.append({
                    'provider': 'solidtorrents',
                    'source': 'torrent',
                    'seeders': seeders,
                    'hash': hash,
                    'name': name,
                    'name_info': name_info,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize
                })
            except:
                source_utils.scraper_error('SOLIDTORRENTS')
예제 #10
0
    def get_sources_packs(self, link):
        # log_utils.log('link = %s' % str(link))
        try:
            r = client.request(link, timeout='5')
            if not r: return
            results = jsloads(r)['results']
        except:
            source_utils.scraper_error('SOLIDTORRENTS')
            return
        for item in results:
            try:
                url = unquote_plus(item['magnet']).replace(' ', '.')
                url = re.sub(
                    r'(&tr=.+)&dn=', '&dn=',
                    url)  # some links on solidtorrents &tr= before &dn=
                url = source_utils.strip_non_ascii_and_unprintable(url)
                hash = item['infohash'].lower()
                if url in str(self.sources): continue

                name = source_utils.clean_name(item['title'])
                if not self.search_series:
                    if not self.bypass_filter:
                        if not source_utils.filter_season_pack(
                                self.title, self.aliases, self.year,
                                self.season_x, name):
                            continue
                    package = 'season'

                elif self.search_series:
                    if not self.bypass_filter:
                        valid, last_season = source_utils.filter_show_pack(
                            self.title, self.aliases, self.imdb, self.year,
                            self.season_x, name, self.total_seasons)
                        if not valid: continue
                    else:
                        last_season = self.total_seasons
                    package = 'show'

                name_info = source_utils.info_from_name(name,
                                                        self.title,
                                                        self.year,
                                                        season=self.season_x,
                                                        pack=package)
                if source_utils.remove_lang(name_info): continue
                try:
                    seeders = int(item['swarm']['seeders'])
                    if self.min_seeders > seeders: continue
                except:
                    seeders = 0

                quality, info = source_utils.get_release_quality(
                    name_info, url)
                try:
                    dsize, isize = source_utils.convert_size(item["size"],
                                                             to='GB')
                    info.insert(0, isize)
                except:
                    dsize = 0
                info = ' | '.join(info)

                item = {
                    'provider': 'solidtorrents',
                    'source': 'torrent',
                    'seeders': seeders,
                    'hash': hash,
                    'name': name,
                    'name_info': name_info,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'package': package
                }
                if self.search_series:
                    item.update({'last_season': last_season})
                self.sources.append(item)
            except:
                source_utils.scraper_error('SOLIDTORRENTS')
예제 #11
0
    def sources(self, data, hostDict):
        sources = []
        if not data: return sources
        try:
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            self.year = data['year']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else self.year
            self.season = str(
                data['season']) if 'tvshowtitle' in data else None
            self.episode = str(
                data['episode']) if 'tvshowtitle' in data else None
            query_list = self.episode_query_list(
            ) if 'tvshowtitle' in data else self.year_query_list()
            # log_utils.log('query_list = %s' % query_list)
            try:
                cloud_folders = alldebrid.AllDebrid().user_cloud()['magnets']
            except:
                return sources
            if not cloud_folders: return sources
            cloud_folders = [i for i in cloud_folders if i['statusCode'] == 4]
            if not cloud_folders: return sources
            ignoreM2ts = getSetting('ad_cloud.ignore.m2ts') == 'true'
            extras_filter = cloud_utils.extras_filter()
        except:
            from resources.lib.modules import log_utils
            log_utils.error('AD_CLOUD: ')
            return sources

        for folder in cloud_folders:
            is_m2ts = False
            try:
                folder_name = folder.get('filename')
                if not cloud_utils.cloud_check_title(title, aliases,
                                                     folder_name):
                    continue
                files = folder.get('links', '')
                # files = [i for i in files if i['filename'].lower().endswith(tuple(supported_video_extensions()))]
                if not files: continue
            except:
                from resources.lib.modules import log_utils
                log_utils.error('AD_CLOUD: ')
                return sources

            for file in files:
                try:
                    name = file.get('filename', '')
                    if name.lower().endswith(invalid_extensions): continue
                    path = folder.get('filename', '').lower()
                    rt = cloud_utils.release_title_format(name)
                    if any(value in rt for value in extras_filter): continue

                    if '.m2ts' in str(file.get('files')):
                        if ignoreM2ts: continue
                        if name in str(sources): continue
                        if all(not bool(re.search(i, rt)) for i in query_list):
                            continue  # check if this newly added causes any movie titles that do not have the year to get dropped
                        is_m2ts = True
                        m2ts_files = [
                            i for i in files if name == i.get('filename')
                        ]
                        largest = sorted(m2ts_files,
                                         key=lambda k: k['size'],
                                         reverse=True)[0]
                        link = largest.get('link', '')
                        size = largest.get('size', '')
                    else:
                        if all(not bool(re.search(i, rt)) for i in query_list):
                            if 'tvshowtitle' in data:
                                season_folder_list = self.season_folder_list()
                                if all(not bool(re.search(i, path))
                                       for i in season_folder_list):
                                    continue
                                episode_list = self.episode_list()
                                if all(not bool(re.search(i, rt))
                                       for i in episode_list):
                                    continue
                            else:
                                if all(not bool(re.search(i, path))
                                       for i in query_list):
                                    continue
                                name = folder.get('filename', '')
                        link = file.get('link', '')
                        size = file.get('size', '')

                    name_info = fs_utils.info_from_name(
                        name, title, self.year, hdlr, episode_title)
                    hash = folder.get('hash', '')
                    seeders = folder.get('seeders', '')
                    quality, info = fs_utils.get_release_quality(
                        name_info, name)
                    try:
                        dsize, isize = fs_utils.convert_size(size, to='GB')
                        info.insert(0, isize)
                    except:
                        dsize = 0
                    if is_m2ts: info.append('M2TS')
                    info = ' / '.join(info)

                    sources.append({
                        'provider': 'ad_cloud',
                        'source': 'cloud',
                        'debrid': 'AllDebrid',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'name_info': name_info,
                        'quality': quality,
                        'language': 'en',
                        'url': link,
                        'info': info,
                        'direct': True,
                        'debridonly': True,
                        'size': dsize
                    })
                except:
                    from resources.lib.modules import log_utils
                    log_utils.error('AD_CLOUD: ')
                    return sources
        return sources
예제 #12
0
    def sources(self, data, hostDict):
        sources = []
        if not data: return sources
        auth = self._get_auth()
        if not auth: return sources
        try:
            title_chk = getSetting('easynews.title.chk') == 'true'

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')
            aliases = data['aliases']

            episode_title = data['title'] if 'tvshowtitle' in data else None
            year = data['year']
            years = [str(year),
                     str(int(year) +
                         1), str(int(year) -
                                 1)] if 'tvshowtitle' not in data else None
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else year

            query = self._query(data)
            url, params = self._translate_search(query)
            headers = {'Authorization': auth}
            results = requests.get(url,
                                   params=params,
                                   headers=headers,
                                   timeout=15).json()
            down_url = results.get('downURL')
            dl_farm = results.get('dlFarm')
            dl_port = results.get('dlPort')
            files = results.get('data', [])
        except:
            source_utils.scraper_error('EASYNEWS')
            return sources
        for item in files:
            try:
                post_hash, post_title, ext, duration = item['0'], item[
                    '10'], item['11'], item['14']
                # log_utils.log('post_title = %s' % post_title, __name__, log_utils.LOGDEBUG)
                checks = [False] * 5
                if 'alangs' in item and item['alangs'] and 'eng' not in item[
                        'alangs']:
                    checks[1] = True
                if re.match(r'^\d+s', duration) or re.match(
                        '^[0-5]m', duration):
                    checks[2] = True
                if 'passwd' in item and item['passwd']: checks[3] = True
                if 'virus' in item and item['virus']: checks[4] = True
                if 'type' in item and item['type'].upper() != 'VIDEO':
                    checks[5] = True
                if any(checks): continue

                stream_url = down_url + quote(
                    '/%s/%s/%s%s/%s%s' %
                    (dl_farm, dl_port, post_hash, ext, post_title, ext))
                name = source_utils.clean_name(post_title)
                # log_utils.log('name = %s' % name, __name__, log_utils.LOGDEBUG)
                name_chk = name
                if 'tvshowtitle' in data:
                    name_chk = re.sub(r'S\d+([.-])E\d+', hdlr, name_chk, 1,
                                      re.I)
                    name_chk = re.sub(r'^tvp[.-]', '', name_chk, 1, re.I)
                name_chk = re.sub(r'disney[.-]gallery[.-]star[.-]wars[.-]', '',
                                  name_chk, 0, re.I)
                name_chk = re.sub(r'marvels[.-]', '', name_chk, 0, re.I)
                if title_chk:
                    if not source_utils.check_title(title, aliases, name_chk,
                                                    hdlr, year, years):
                        continue

                name_info = source_utils.info_from_name(
                    name_chk, title, year, hdlr, episode_title)
                if source_utils.remove_lang(name_info): continue

                file_dl = stream_url + '|Authorization=%s' % (quote(auth))

                quality, info = source_utils.get_release_quality(
                    name_info, file_dl)
                try:
                    size = float(int(item['rawSize']))
                    dsize, isize = source_utils.convert_size(size, to='GB')
                    if isize: info.insert(0, isize)
                except:
                    dsize = 0
                info = ' | '.join(info)

                sources.append({
                    'provider': 'easynews',
                    'source': 'direct',
                    'name': name,
                    'name_info': name_info,
                    'quality': quality,
                    'language': "en",
                    'url': file_dl,
                    'info': info,
                    'direct': True,
                    'debridonly': False,
                    'size': dsize
                })
            except:
                source_utils.scraper_error('EASYNEWS')
        return sources