def sources_packs(self,
                      url,
                      hostDict,
                      search_series=False,
                      total_seasons=None,
                      bypass_filter=False):
        self.sources = []
        self.items = []
        if not url: return self.sources
        try:
            self.search_series = search_series
            self.total_seasons = total_seasons
            self.bypass_filter = bypass_filter

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data['tvshowtitle'].replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.imdb = data['imdb']
            self.year = data['year']
            self.season_x = data['season']
            self.season_xx = self.season_x.zfill(2)

            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', self.title)
            queries = [
                self.search_link % quote_plus(query + ' S%s' % self.season_xx),
                self.search_link %
                quote_plus(query + ' Season %s' % self.season_x)
            ]
            if search_series:
                queries = [
                    self.search_link % quote_plus(query + ' Season'),
                    self.search_link % quote_plus(query + ' Complete')
                ]
            threads = []
            for url in queries:
                link = urljoin(self.base_link, url).replace('+', '-')
                threads.append(workers.Thread(self.get_pack_items, link))
            [i.start() for i in threads]
            [i.join() for i in threads]

            threads2 = []
            for i in self.items:
                threads2.append(workers.Thread(self.get_pack_sources, i))
            [i.start() for i in threads2]
            [i.join() for i in threads2]
            return self.sources
        except:
            source_utils.scraper_error('YOURBITTORRENT')
            return self.sources
Exemplo n.º 2
0
    def sources(self, data, hostDict):
        self.sources = []
        if not data: return self.sources
        try:
            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.episode_title = data[
                'title'] if 'tvshowtitle' in data else None
            self.year = data['year']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else self.year

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
            urls = []
            url = '%s%s' % (self.base_link,
                            self.search_link % quote_plus(query))
            urls.append(url)
            urls.append(url + '&p=2')
            # log_utils.log('urls = %s' % urls, log_utils.LOGDEBUG)
            threads = []
            for url in urls:
                threads.append(workers.Thread(self.get_sources, url))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('BTSCENE')
            return self.sources
Exemplo n.º 3
0
	def sources_packs(self, data, hostDict, search_series=False, total_seasons=None, bypass_filter=False):
		self.sources = []
		if not data: return self.sources
		try:
			self.search_series = search_series
			self.total_seasons = total_seasons
			self.bypass_filter = bypass_filter

			self.title = data['tvshowtitle'].replace('&', 'and').replace('Special Victims Unit', 'SVU')
			self.aliases = data['aliases']
			self.imdb = data['imdb']
			self.year = data['year']
			self.season_x = data['season']
			self.season_xx = self.season_x.zfill(2)

			query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', self.title)
			queries = [
						self.search_link % quote_plus(query + ' S%s' % self.season_xx),
						self.search_link % quote_plus(query + ' Season %s' % self.season_x)]
			if search_series:
				queries = [
						self.search_link % quote_plus(query + ' Season'),
						self.search_link % quote_plus(query + ' Complete')]
			threads = []
			for url in queries:
				link = '%s%s' % (self.base_link, url)
				threads.append(workers.Thread(self.get_sources_packs, link))
			[i.start() for i in threads]
			[i.join() for i in threads]
			return self.sources
		except:
			source_utils.scraper_error('GLODLS')
			return self.sources
Exemplo n.º 4
0
	def sources(self, url, hostDict):
		self.sources = []
		if not url: return self.sources
		try:
			data = parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			self.title = self.title.replace('&', 'and').replace('Special Victims Unit', 'SVU')
			self.aliases = data['aliases']
			self.episode_title = data['title'] if 'tvshowtitle' in data else None
			self.year = data['year']
			self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else self.year

			query = '%s %s' % (self.title, self.hdlr)
			query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
			url = self.search_link % quote_plus(query)
			url = urljoin(self.base_link, url)
			# log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

			r = client.request(url, timeout='10')
			if not r: return self.sources
			links = client.parseDOM(r, "td", attrs={"nowrap": "nowrap"})
			threads = []
			for link in links:
				threads.append(workers.Thread(self.get_sources, link))
			[i.start() for i in threads]
			[i.join() for i in threads]
			return self.sources
		except:
			source_utils.scraper_error('ETTV')
			return self.sources
Exemplo n.º 5
0
    def sources(self, data, hostDict):
        self.sources = []
        if not data: return self.sources
        try:
            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.episode_title = data[
                'title'] if 'tvshowtitle' in data else None
            self.year = data['year']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else self.year

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
            url = '%s%s' % (self.base_link,
                            self.search_link % quote_plus(query))
            # log_utils.log('url = %s' % url)

            result = client.request(url, timeout='5')
            if not result or '<tbody' not in result: return
            table = client.parseDOM(result, 'tbody')[0]
            rows = client.parseDOM(table, 'tr')
            threads = []
            for row in rows:
                threads.append(workers.Thread(self.get_sources, row))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('ISOHUNT2')
            return self.sources
Exemplo n.º 6
0
    def sources(self, data, hostDict):
        self.sources = []
        if not data: return self.sources
        try:
            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.episode_title = data[
                'title'] if 'tvshowtitle' in data else None
            self.year = data['year']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else self.year

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
            url = self.search_link % quote_plus(query)
            url = '%s%s' % (self.base_link, url)
            # log_utils.log('url = %s' % url)

            r = client.request(url, timeout='10')
            if not r: return self.sources
            links = re.findall(r'<a\s*href\s*=\s*(/torrent/.+?)>', r,
                               re.DOTALL | re.I)
            threads = []
            for link in links:
                threads.append(workers.Thread(self.get_sources, link))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('TORLOCK')
            return self.sources
Exemplo n.º 7
0
	def sources(self, url, hostDict):
		self.sources = []
		if not url: return self.sources
		try:
			data = parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			self.title = self.title.replace('&', 'and').replace('Special Victims Unit', 'SVU')
			self.aliases = data['aliases']
			self.episode_title = data['title'] if 'tvshowtitle' in data else None
			self.year = data['year']
			self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else self.year

			query = '%s %s' % (self.title, self.hdlr)
			query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
			url = self.search_link % quote_plus(query)
			url = urljoin(self.base_link, url)
			# log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

			r = client.request(url, timeout='5')
			if not r: return self.sources
			r = client.parseDOM(r, 'table', attrs={'class': 'tmain'})[0]
			links = re.findall(r'<a\s*href\s*=\s*["\'](/torrent/.+?)["\']>(.+?)</a>', r, re.DOTALL | re.I)
			threads = []
			for link in links:
				threads.append(workers.Thread(self.get_sources, link))
			[i.start() for i in threads]
			[i.join() for i in threads]
			return self.sources
		except:
			source_utils.scraper_error('TORRENTFUNK')
			return self.sources
Exemplo n.º 8
0
	def sources(self, url, hostDict):
		self.sources = []
		if not url: return self.sources
		try:
			data = parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			self.title = self.title.replace('&', 'and').replace('Special Victims Unit', 'SVU')
			self.aliases = data['aliases']
			self.episode_title = data['title'] if 'tvshowtitle' in data else None
			self.year = data['year']
			self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else self.year

			query = '%s %s' % (self.title, self.hdlr)
			query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
			urls = []
			url = self.search_link % quote(query + ' -soundtrack') # filter
			url = urljoin(self.base_link, url)
			urls.append(url)
			urls.append(url + '&page=2')
			# log_utils.log('urls = %s' % urls, __name__, log_utils.LOGDEBUG)

			threads = []
			for url in urls:
				threads.append(workers.Thread(self.get_sources, url))
			[i.start() for i in threads]
			[i.join() for i in threads]
			return self.sources
		except:
			source_utils.scraper_error('BTDB')
			return self.sources
Exemplo n.º 9
0
    def sources(self, url, hostDict):
        self.sources = []
        self.items = []
        if not url: return self.sources
        try:
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.episode_title = data[
                'title'] if 'tvshowtitle' in data else None
            self.year = data['year']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else self.year

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
            urls = []
            if 'tvshowtitle' in data:
                urls.append(self.tvsearch % (quote(query)))
            else:
                urls.append(self.moviesearch % (quote(query)))
            url2 = ''.join(urls).replace('/1/', '/2/')
            urls.append(url2)
            # log_utils.log('urls = %s' % urls, log_utils.LOGDEBUG)

            threads = []
            for url in urls:
                threads.append(workers.Thread(self.get_items, url))
            [i.start() for i in threads]
            [i.join() for i in threads]

            threads2 = []
            for i in self.items:
                threads2.append(workers.Thread(self.get_sources, i))
            [i.start() for i in threads2]
            [i.join() for i in threads2]
            return self.sources
        except:
            source_utils.scraper_error('1337X')
            return self.sources
Exemplo n.º 10
0
 def get(self, netloc, ua, timeout):
     threads = []
     for i in list(range(0, 15)):
         threads.append(workers.Thread(self.get_cookie, netloc, ua,
                                       timeout))
     [i.start() for i in threads]
     for i in list(range(0, 30)):
         if self.cookie is not None:
             return self.cookie
         sleep(1)
Exemplo n.º 11
0
    def sources_packs(self,
                      data,
                      hostDict,
                      search_series=False,
                      total_seasons=None,
                      bypass_filter=False):
        self.sources = []
        if not data: return self.sources
        try:
            self.search_series = search_series
            self.total_seasons = total_seasons
            self.bypass_filter = bypass_filter

            self.title = data['tvshowtitle'].replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            if self.title == 'The End of the F*****g World' or self.title == 'The End of the F***ing World':
                self.title = 'The End of the Fxxxing World'
            self.aliases = data['aliases']
            self.imdb = data['imdb']
            self.year = data['year']
            self.season_x = data['season']
            self.season_xx = self.season_x.zfill(2)

            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', self.title)
            queries = [
                self.search_link.format(
                    query[0].lower(),
                    cleantitle.geturl(query + ' S%s' % self.season_xx)),
                self.search_link.format(
                    query[0].lower(),
                    cleantitle.geturl(query + ' Season %s' % self.season_x))
            ]
            if search_series:
                queries = [
                    self.search_link.format(
                        query[0].lower(),
                        cleantitle.geturl(query + ' Season')),
                    self.search_link.format(
                        query[0].lower(),
                        cleantitle.geturl(query + ' Complete'))
                ]
            threads = []
            for url in queries:
                link = '%s%s' % (self.base_link, url)
                threads.append(workers.Thread(self.get_sources_packs, link))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('MAGNETDL')
            return self.sources
Exemplo n.º 12
0
    def sources_packs(self,
                      url,
                      hostDict,
                      search_series=False,
                      total_seasons=None,
                      bypass_filter=False):
        self.sources = []
        if not url: return self.sources
        try:
            self.search_series = search_series
            self.total_seasons = total_seasons
            self.bypass_filter = bypass_filter

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data['tvshowtitle'].replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.imdb = data['imdb']
            self.year = data['year']
            self.season_x = data['season']
            self.season_xx = self.season_x.zfill(2)
            category = '+category%3ATV'

            # query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', self.title)
            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', self.title)
            queries = [
                self.search_link % quote_plus(query + ' S%s' % self.season_xx),
                self.search_link %
                quote_plus(query + ' Season %s' % self.season_x)
            ]
            if self.search_series:
                queries = [
                    self.search_link % quote_plus(query + ' Season'),
                    self.search_link % quote_plus(query + ' Complete')
                ]
            threads = []
            for url in queries:
                link = urljoin(self.base_link,
                               url) + str(category) + '&v=t&s=sz&sd=d'
                threads.append(workers.Thread(self.get_sources_packs, link))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('ZOOQLE')
            return self.sources
    def sources(self, url, hostDict):
        self.sources = []
        if not url: return self.sources
        try:
            scraper = cfscrape.create_scraper()
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.episode_title = data[
                'title'] if 'tvshowtitle' in data else None
            self.year = data['year']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else self.year

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
            urls = []
            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            urls.append(url)
            # urls.append('%s%s' % (url, '&page=2')) # next page is not working atm
            # urls.append('%s%s' % (url, '&page=3'))
            # log_utils.log('urls = %s' % urls, log_utils.LOGDEBUG)

            links = []
            for x in urls:
                r = py_tools.ensure_str(scraper.get(x).content,
                                        errors='replace')
                if not r: continue
                list = client.parseDOM(r, 'tr', attrs={'class': 'tlr'})
                list += client.parseDOM(r, 'tr', attrs={'class': 'tlz'})
                for item in list:
                    links.append(item)
            threads = []
            for link in links:
                threads.append(workers.Thread(self.get_sources, link))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('EXTRATORRENT')
            return self.sources
Exemplo n.º 14
0
    def sources(self, url, hostDict):
        self.sources = []
        if not url: return self.sources
        try:
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.episode_title = data[
                'title'] if 'tvshowtitle' in data else None
            self.year = data['year']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else self.year

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
            urls = []
            if 'tvshowtitle' in data:
                url = self.tvsearch.format(quote_plus(query))
            else:
                url = self.moviesearch.format(quote_plus(query))
            url = urljoin(self.base_link, url)
            urls.append(url)

            if url.endswith('field=size&sorder=desc'):
                url2 = url.rsplit("/", 1)[0] + '/2/'
                urls.append(url2)
            else:
                url2 = url + '/2/'
                urls.append(url2)
            threads = []
            for url in urls:
                threads.append(workers.Thread(self.get_sources, url))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('KICKASS2')
            return self.sources
Exemplo n.º 15
0
    def sources(self, data, hostDict):
        self.sources = []
        if not data: return self.sources
        try:
            self.scraper = cfscrape.create_scraper()

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.episode_title = data[
                'title'] if 'tvshowtitle' in data else None
            self.year = data['year']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else self.year

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
            urls = []
            url = self.tvsearch_link % quote_plus(
                query
            ) if 'tvshowtitle' in data else self.msearch_link % quote_plus(
                query)
            url = '%s%s' % (self.base_link, url)
            urls.append(url)
            urls.append(
                '%s%s' %
                (url, '&page=2'))  # next page seems to be working once again
            # urls.append('%s%s' % (url, '&page=3'))
            # log_utils.log('urls = %s' % urls)

            threads = []
            for url in urls:
                threads.append(workers.Thread(self.get_sources, url))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('EXTRATORRENT')
            return self.sources
Exemplo n.º 16
0
    def sources(self, url, hostDict):
        self.sources = []
        if not url: return self.sources
        try:
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.episode_title = data[
                'title'] if 'tvshowtitle' in data else None
            self.year = data['year']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else self.year

            category = '+category%3ATV' if 'tvshowtitle' in data else '+category%3AMovies'
            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
            urls = []
            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link,
                          url) + str(category) + '&v=t&s=sz&sd=d'
            urls.append(url)
            urls.append(url.replace('pg=1', 'pg=2'))
            # log_utils.log('urls = %s' % urls, log_utils.LOGDEBUG)
            threads = []
            for url in urls:
                threads.append(workers.Thread(self.get_sources, url))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('ZOOQLE')
            return self.sources
Exemplo n.º 17
0
    def sources(self, data, hostDict):
        self.sources = []
        if not data: return self.sources
        try:
            self.scraper = cfscrape.create_scraper()

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.episode_title = data[
                'title'] if 'tvshowtitle' in data else None
            self.year = data['year']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else self.year

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
            urls = []
            if 'tvshowtitle' in data:
                url = self.tvsearch.format(quote_plus(query))
            else:
                url = self.moviesearch.format(quote_plus(query))
            urls.append(url)

            url2 = url.replace('/1/', '/2/')
            urls.append(url2)
            threads = []
            for url in urls:
                link = ('%s%s' % (self.base_link, url)).replace('+', '-')
                threads.append(workers.Thread(self.get_sources, link))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('LIMETORRENTS')
            return self.sources
Exemplo n.º 18
0
    def sources(self, data, hostDict):
        self.sources = []
        if not data: return self.sources
        try:
            self.hostDict = hostDict

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.episode_title = data[
                'title'] if 'tvshowtitle' in data else None
            self.year = data['year']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else self.year
            imdb = data['imdb']

            url = self.search(self.title, self.year)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)
            if not url: return self.sources

            # result = self.scraper.get(url, headers=self.headers).content
            result = py_tools.ensure_str(self.scraper.get(
                url, headers=self.headers).content,
                                         errors='replace')
            if not result: return self.sources
            r_pack = None
            if 'tvshowtitle' in data:
                r = dom_parser.parse_dom(result, 'ul', {'id': 'episodes'})
                # r_pack = dom_parser.parse_dom(result, 'ul', {'id': 'packs'}) # Rapidmoviez has pack files, needs more work
            else:
                r = dom_parser.parse_dom(result, 'ul', {'id': 'releases'})

            if not r and not r_pack: return self.sources
            if r:
                r = dom_parser.parse_dom(r[0].content, 'a', req=['href'])
                r = [(i.content, urljoin(self.base_link, i.attrs['href']))
                     for i in r if i and i.content != 'Watch']
                r = [(i[0], i[1]) for i in r if self.hdlr in i[0].upper()]

            # if r_pack:
            # r_pack = dom_parser.parse_dom(r_pack[0].content, 'a', req=['href'])
            # r_pack = [(i.content, urljoin(self.base_link, i.attrs['href'])) for i in r_pack if i and i.content != 'Watch']
            # r += [(i[0], i[1]) for i in r_pack if 'S%02d' % int(data['season']) in i[0].upper()]
            # r += [(i[0], i[1]) for i in r_pack if 'SEASON %02d' % int(data['season']) in i[0].upper()]

            # log_utils.log('r = %s' % r, log_utils.LOGDEBUG)
            threads = []
            for i in r:
                threads.append(workers.Thread(self.get_sources, i[0], i[1]))
            [i.start() for i in threads]
            alive = [x for x in threads if x.is_alive() is True]
            while alive:
                alive = [x for x in threads if x.is_alive() is True]
                time.sleep(0.1)
            return self.sources
        except:
            source_utils.scraper_error('RAPIDMOVIEZ')
            return self.sources