def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
         query = '%s S%02dE%02d' % (
             data['tvshowtitle'], int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s' % \
                                                                                                           data[
                                                                                                               'imdb']
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         token = client.request(self.token)
         token = json.loads(token)["token"]
         if 'tvshowtitle' in data:
             search_link = self.tvsearch.format(token, urllib.quote_plus(query), 'format=json_extended')
         else:
             search_link = self.msearch.format(token, data['imdb'], 'format=json_extended')
         rjson = client.request(search_link)
         files = json.loads(rjson)['torrent_results']
         for file in files:
             name = file["title"]
             quality, info = source_utils.get_release_quality(name, name)
             size = source_utils.convert_size(file["size"])
             info.append(size)
             info = ' | '.join(info)
             url = file["download"]
             url = url.split('&tr')[0]
             sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                             'direct': False, 'debridonly': True})
         return sources
     except BaseException:
         return sources
Beispiel #2
0
	def get_sources(self, url):
		try:
			r = client.request(url)
			if r == str([]) or r == '' or r is None:
				return
			r = json.loads(r)
			results = r['results']

			for item in results:
				try:
					url = urllib.unquote_plus(item['magnet']).replace(' ', '.')
					url = re.sub(r'(&tr=.+)&dn=', '&dn=', url) # some links on solidtorrents &tr= before &dn=
					hash = item['infohash']

					name = item['title']
					name = re.sub('[^A-Za-z0-9]+', '.', name).lstrip('.')
					if name.startswith('www'):
						try:
							name = re.sub(r'www(.*?)\W{2,10}', '', name)
						except:
							name = name.split('-.', 1)[1].lstrip()
					if source_utils.remove_lang(name):
						continue

					match = source_utils.check_title(self.title, name, self.hdlr, self.year)
					if not match:
						continue

					if url in str(self.sources):
						continue

					try:
						seeders = int(item['swarm']['seeders'])
						if self.min_seeders > seeders: 
							continue
					except:
						seeders = 0
						pass

					quality, info = source_utils.get_release_quality(name, url)

					try:
						dsize, isize = source_utils.convert_size(item["size"], to='GB')
						info.insert(0, isize)
					except:
						dsize = 0
						pass

					info = ' | '.join(info)

					self.sources.append({'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
												'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
				except:
					source_utils.scraper_error('SOLIDTORRENTS')
					pass
		except:
			source_utils.scraper_error('SOLIDTORRENTS')
			pass
Beispiel #3
0
    def _get_items(self, r):
        try:
            try:
                seeders = int(
                    re.search(r'<seeders>([\d]+)</seeders>',
                              r).groups()[0].replace(',', ''))
                if seeders < self.min_seeders:
                    return
            except:
                seeders = 0
                pass

            hash = re.search(r'<info_hash>([a-zA-Z0-9]+)</info_hash>',
                             r).groups()[0]
            name = re.search(r'<title>(.+?)</title>', r).groups()[0]
            name = urllib.unquote_plus(name)
            name = re.sub('[^A-Za-z0-9]+', '.', name).lstrip('.')
            if source_utils.remove_lang(name):
                return

            match = source_utils.check_title(self.title, name, self.hdlr,
                                             self.year)
            if not match:
                return

            url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name)

            quality, info = source_utils.get_release_quality(name, url)

            try:
                size = re.search(r'<size>([\d]+)</size>', r).groups()[0]
                dsize, isize = source_utils.convert_size(float(size), to='GB')
                info.insert(0, isize)
            except:
                dsize = 0
                pass

            info = ' | '.join(info)

            self._sources.append({
                'source': 'torrent',
                'seeders': seeders,
                'hash': hash,
                'name': name,
                'quality': quality,
                'language': 'en',
                'url': url,
                'info': info,
                'direct': False,
                'debridonly': True,
                'size': dsize
            })
        except:
            source_utils.scraper_error('TORRENTDOWNLOADS')
            pass
Beispiel #4
0
    def sources_packs(self,
                      url,
                      hostDict,
                      hostprDict,
                      search_series=False,
                      total_seasons=None,
                      bypass_filter=False):
        sources = []
        self.bypass_filter = bypass_filter

        if search_series:  # torrentapi does not have showPacks
            return sources
        try:
            if url is None:
                return sources
            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data['tvshowtitle'].replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.year = data['year']
            self.season_x = data['season']
            self.season_xx = self.season_x.zfill(2)

            query = re.sub('[^A-Za-z0-9\s\.-]+', '', self.title)
            search_link = self.tvsearch.format(
                self.key, quote_plus(query + ' S%s' % self.season_xx))
            # log_utils.log('search_link = %s' % str(search_link), __name__, log_utils.LOGDEBUG)

            time.sleep(2.1)
            rjson = client.request(search_link, error=True)
            if not rjson or not 'torrent_results' in str(rjson):
                return sources

            files = json.loads(rjson)['torrent_results']
            for file in files:
                url = file["download"]
                url = url.split('&tr')[0]
                hash = re.compile('btih:(.*?)&').findall(url)[0]

                name = file["title"]
                name = unquote_plus(name)
                name = source_utils.clean_name(self.title, name)
                if source_utils.remove_lang(name):
                    continue

                if not self.bypass_filter:
                    if not source_utils.filter_season_pack(
                            self.title, self.aliases, self.year, self.season_x,
                            name):
                        continue
                package = 'season'

                try:
                    seeders = int(file["seeders"])
                    if self.min_seeders > seeders:
                        continue
                except:
                    seeders = 0
                    pass

                quality, info = source_utils.get_release_quality(name, name)

                try:
                    dsize, isize = source_utils.convert_size(file["size"],
                                                             to='GB')
                    info.insert(0, isize)
                except:
                    dsize = 0
                    pass

                info = ' | '.join(info)

                sources.append({
                    'source': 'torrent',
                    'seeders': seeders,
                    'hash': hash,
                    'name': name,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'package': package
                })
            return sources
        except:
            source_utils.scraper_error('TORRENTAPI')
            return sources
Beispiel #5
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('[^A-Za-z0-9\s\.-]+', '', query)

            if 'tvshowtitle' in data:
                search_link = self.tvsearch.format(self.key, quote_plus(query))
            else:
                search_link = self.msearch.format(self.key, data['imdb'])
            # log_utils.log('search_link = %s' % search_link, log_utils.LOGDEBUG)

            time.sleep(2.1)
            rjson = client.request(search_link, error=True)
            if not rjson or not 'torrent_results' in str(rjson):
                return sources

            files = json.loads(rjson)['torrent_results']

            for file in files:
                url = file["download"]
                url = url.split('&tr')[0]
                hash = re.compile('btih:(.*?)&').findall(url)[0]

                name = file["title"]
                name = unquote_plus(name)
                name = source_utils.clean_name(title, name)
                if source_utils.remove_lang(name, episode_title):
                    continue

                if not source_utils.check_title(title, aliases, name, hdlr,
                                                data['year']):
                    continue

                # filter for episode multi packs (ex. S01E01-E17 is also returned in query)
                if episode_title:
                    if not source_utils.filter_single_episodes(hdlr, name):
                        continue

                try:
                    seeders = int(file["seeders"])
                    if self.min_seeders > seeders:
                        continue
                except:
                    seeders = 0
                    pass

                quality, info = source_utils.get_release_quality(name, name)

                try:
                    dsize, isize = source_utils.convert_size(file["size"],
                                                             to='GB')
                    info.insert(0, isize)
                except:
                    dsize = 0
                    pass

                info = ' | '.join(info)

                sources.append({
                    'source': 'torrent',
                    'seeders': seeders,
                    'hash': hash,
                    'name': name,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize
                })
            return sources
        except:
            source_utils.scraper_error('TORRENTAPI')
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s %s' % (title, hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            token = client.request(self.token)
            token = json.loads(token)["token"]

            if 'tvshowtitle' in data:
                search_link = self.tvsearch.format(token,
                                                   urllib.quote_plus(query),
                                                   'format=json_extended')
            else:
                search_link = self.msearch.format(token, data['imdb'],
                                                  'format=json_extended')
            # log_utils.log('search_link = %s' % search_link, log_utils.LOGDEBUG)

            time.sleep(2)

            rjson = client.request(search_link, error=True)
            if 'No results found' in rjson:
                return sources

            files = json.loads(rjson)['torrent_results']

            for file in files:
                url = file["download"]
                url = url.split('&tr')[0]

                name = file["title"]
                name = urllib.unquote_plus(name).replace(' ', '.')
                if source_utils.remove_lang(name):
                    continue

                t = name.split(hdlr)[0].replace(data['year'], '').replace(
                    '(', '').replace(')', '').replace('&', 'and').replace(
                        '.US.', '.').replace('.us.', '.')
                if cleantitle.get(t) != cleantitle.get(title):
                    continue

                if hdlr not in name:
                    continue

                quality, info = source_utils.get_release_quality(name, name)

                size = source_utils.convert_size(file["size"])
                info.insert(0, size)
                info = ' | '.join(info)

                sources.append({
                    'source': 'torrent',
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True
                })
            return sources

        except:
            source_utils.scraper_error('TORRENTAPI')
            return sources
Beispiel #7
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            api_key = control.setting('filepursuit.api')
            if api_key == '':
                return sources
            headers = {
                "x-rapidapi-host": "filepursuit.p.rapidapi.com",
                "x-rapidapi-key": api_key
            }

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else data['year']
            self.year = data['year']

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            r = client.request(url, headers=headers)
            r = json.loads(r)

            if 'not_found' in r['status']:
                return sources

            results = r['files_found']
            for item in results:
                try:
                    size = int(item['file_size_bytes'])
                except:
                    size = 0

                try:
                    name = item['file_name']
                except:
                    name = item['file_link'].split('/')[-1]

                if source_utils.remove_lang(name):
                    continue

                if not source_utils.check_title(self.title, self.aliases, name,
                                                self.hdlr, self.year):
                    continue

                url = item['file_link']

                quality, info = source_utils.get_release_quality(name, url)
                try:
                    dsize, isize = source_utils.convert_size(size, to='GB')
                    if isize:
                        info.insert(0, isize)
                except:
                    source_utils.scraper_error('FILEPURSUIT')
                    dsize = 0
                    pass

                info = ' | '.join(info)

                sources.append({
                    'source': 'direct',
                    'quality': quality,
                    'name': name,
                    'language': "en",
                    'url': url,
                    'info': info,
                    'direct': True,
                    'debridonly': False,
                    'size': dsize
                })
            return sources
        except:
            source_utils.scraper_error('FILEPURSUIT')
            return sources
Beispiel #8
0
    def get_sources_packs(self, link):
        # log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG)
        try:
            r = client.request(link)
            if not r:
                return
            r = json.loads(r)
            results = r['results']

            for item in results:
                try:
                    url = unquote_plus(item['magnet']).replace(' ', '.')
                    url = re.sub(
                        r'(&tr=.+)&dn=', '&dn=',
                        url)  # some links on solidtorrents &tr= before &dn=
                    url = source_utils.strip_non_ascii_and_unprintable(url)
                    hash = item['infohash'].lower()

                    name = item['title']
                    name = source_utils.clean_name(self.title, name)
                    if source_utils.remove_lang(name):
                        continue

                    if not self.search_series:
                        if not self.bypass_filter:
                            if not source_utils.filter_season_pack(
                                    self.title, self.aliases, self.year,
                                    self.season_x, name):
                                continue
                        package = 'season'

                    elif self.search_series:
                        if not self.bypass_filter:
                            valid, last_season = source_utils.filter_show_pack(
                                self.title, self.aliases, self.imdb, self.year,
                                self.season_x, name, self.total_seasons)
                            if not valid:
                                continue
                        else:
                            last_season = self.total_seasons
                        package = 'show'

                    if url in str(self.sources):
                        continue

                    try:
                        seeders = int(item['swarm']['seeders'])
                        if self.min_seeders > seeders:
                            continue
                    except:
                        seeders = 0
                        pass

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        dsize, isize = source_utils.convert_size(item["size"],
                                                                 to='GB')
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    info = ' | '.join(info)

                    item = {
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize,
                        'package': package
                    }
                    if self.search_series:
                        item.update({'last_season': last_season})
                    self.sources.append(item)
                except:
                    source_utils.scraper_error('SOLIDTORRENTS')
                    pass
        except:
            source_utils.scraper_error('SOLIDTORRENTS')
            pass
Beispiel #9
0
    def get_sources(self, url):
        try:
            r = client.request(url)
            if not r:
                return

            r = json.loads(r)
            results = r['results']

            for item in results:
                try:
                    url = unquote_plus(item['magnet']).replace(' ', '.')
                    url = re.sub(
                        r'(&tr=.+)&dn=', '&dn=',
                        url)  # some links on solidtorrents &tr= before &dn=
                    url = source_utils.strip_non_ascii_and_unprintable(url)
                    hash = item['infohash'].lower()

                    name = item['title']
                    name = source_utils.clean_name(self.title, name)
                    if source_utils.remove_lang(name, self.episode_title):
                        continue

                    if not source_utils.check_title(self.title, self.aliases,
                                                    name, self.hdlr,
                                                    self.year):
                        continue

                    # filter for episode multi packs (ex. S01E01-E17 is also returned in query)
                    if self.episode_title:
                        if not source_utils.filter_single_episodes(
                                self.hdlr, name):
                            continue

                    if url in str(self.sources):
                        continue

                    try:
                        seeders = int(item['swarm']['seeders'])
                        if self.min_seeders > seeders:
                            continue
                    except:
                        seeders = 0
                        pass

                    quality, info = source_utils.get_release_quality(name, url)

                    try:
                        dsize, isize = source_utils.convert_size(item["size"],
                                                                 to='GB')
                        info.insert(0, isize)
                    except:
                        dsize = 0
                        pass

                    info = ' | '.join(info)

                    self.sources.append({
                        'source': 'torrent',
                        'seeders': seeders,
                        'hash': hash,
                        'name': name,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True,
                        'size': dsize
                    })
                except:
                    source_utils.scraper_error('SOLIDTORRENTS')
                    pass
        except:
            source_utils.scraper_error('SOLIDTORRENTS')
            pass
Beispiel #10
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if not url:
				return sources
			if debrid.status() is False:
				return sources

			data = parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU')
			aliases = data['aliases']
			hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
			episode_title = data['title'] if 'tvshowtitle' in data else None

			query = '%s %s' % (title, hdlr)
			query = re.sub('[^A-Za-z0-9\s\.-]+', '', query)

			url = self.search_link % quote(query)
			url = urljoin(self.base_link, url)
			# log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

			rjson = client.request(url, error=True)
			if not rjson or 'No results returned' in str(rjson) or 'Connection Time-out' in str(rjson):
				return sources

			files = json.loads(rjson)
			for file in files:
				try:
					hash = file['info_hash']
					name = file['name']
					name = source_utils.clean_name(title, name)
					if source_utils.remove_lang(name, episode_title):
						continue

					url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name) 

					if not source_utils.check_title(title, aliases, name, hdlr, data['year']):
						continue

					# filter for episode multi packs (ex. S01E01-E17 is also returned in query)
					if episode_title:
						if not source_utils.filter_single_episodes(hdlr, name):
							continue

					try:
						seeders= file['seeders']
						if self.min_seeders > seeders:
							continue
					except:
						seeders = 0
						pass

					quality, info = source_utils.get_release_quality(name, url)
					try:
						dsize, isize = source_utils.convert_size(float(file["size"]), to='GB')
						info.insert(0, isize)
					except:
						dsize = 0
						pass

					info = ' | '.join(info)

					sources.append({'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
												'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
				except:
					source_utils.scraper_error('PIRATEBAY')
					continue
			return sources
		except:
			source_utils.scraper_error('PIRATEBAY')
			return sources
Beispiel #11
0
	def get_sources_packs(self, link):
		try:
			# log_utils.log('link = %s' % str(link), __name__, log_utils.LOGDEBUG)
			rjson = client.request(link, error=True)
			if not rjson or 'No results returned' in str(rjson) or 'Connection Time-out' in str(rjson):
				return
			files = json.loads(rjson)
		except:
			source_utils.scraper_error('PIRATEBAY')
			return

		for file in files:
			try:
				hash = file['info_hash']
				name = file['name']
				name = source_utils.clean_name(self.title, name)
				if source_utils.remove_lang(name):
					continue

				url = 'magnet:?xt=urn:btih:%s&dn=%s' % (hash, name) 

				if not self.search_series:
					if not self.bypass_filter:
						if not source_utils.filter_season_pack(self.title, self.aliases, self.year, self.season_x, name):
							continue
					package = 'season'

				elif self.search_series:
					if not self.bypass_filter:
						valid, last_season = source_utils.filter_show_pack(self.title, self.aliases, self.imdb, self.year, self.season_x, name, self.total_seasons)
						if not valid:
							continue
					else:
						last_season = self.total_seasons
					package = 'show'

				try:
					seeders= file['seeders']
					if self.min_seeders > seeders:
						continue
				except:
					seeders = 0
					pass

				quality, info = source_utils.get_release_quality(name, url)
				try:
					dsize, isize = source_utils.convert_size(float(file["size"]), to='GB')
					info.insert(0, isize)
				except:
					dsize = 0
					pass

				info = ' | '.join(info)

				item = {'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
							'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize, 'package': package}
				if self.search_series:
					item.update({'last_season': last_season})
				self.sources.append(item)
			except:
				source_utils.scraper_error('PIRATEBAY')
				continue
Beispiel #12
0
	def sources(self, url, hostDict, hostprDict):
		sources = []
		try:
			if url is None:
				return sources

			if debrid.status() is False:
				return sources

			data = parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU')

			hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

			query = '%s %s' % (title, hdlr)
			query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

			token = client.request(self.token)
			token = json.loads(token)["token"]

			if 'tvshowtitle' in data:
				search_link = self.tvsearch.format(token, quote_plus(query), 'limit=100&format=json_extended')
			else:
				search_link = self.msearch.format(token, data['imdb'], 'limit=100&format=json_extended')
			# log_utils.log('search_link = %s' % search_link, log_utils.LOGDEBUG)

			time.sleep(2)
			rjson = client.request(search_link, error=True)
			if 'No results found' in rjson:
				return sources

			files = json.loads(rjson)['torrent_results']

			for file in files:
				url = file["download"]
				url = url.split('&tr')[0]
				hash = re.compile('btih:(.*?)&').findall(url)[0]

				name = file["title"]
				name = unquote_plus(name)
				name = re.sub('[^A-Za-z0-9]+', '.', name).lstrip('.')
				if source_utils.remove_lang(name):
					continue

				match = source_utils.check_title(title, name, hdlr, data['year'])
				if not match:
					continue

				try:
					seeders = int(file["seeders"])
					if self.min_seeders > seeders: 
						continue
				except:
					seeders = 0
					pass

				quality, info = source_utils.get_release_quality(name, name)

				try:
					dsize, isize = source_utils.convert_size(file["size"], to='GB')
					info.insert(0, isize)
				except:
					dsize = 0
					pass

				info = ' | '.join(info)

				sources.append({'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'quality': quality,
										'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
			return sources
		except:
			source_utils.scraper_error('TORRENTAPI')
			return sources