def __init__(self):
     self.priority = 24
     self.language = ['en']
     self.domains = ['rmz.cr', 'rapidmoviez.site']
     self.base_link = 'http://rmz.cr/'
     self.search_link = 'search/%s'
     # self.base_link = 'http://rapidmoviez.cr/' # cloudflare IUAM challenge failure
     self.scraper = cfscrape.create_scraper()
     self.headers = {'User-Agent': client.agent()}
    def sources(self, url, hostDict):
        self.sources = []
        if not url: return self.sources
        try:
            scraper = cfscrape.create_scraper()
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.episode_title = data[
                'title'] if 'tvshowtitle' in data else None
            self.year = data['year']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else self.year

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
            urls = []
            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            urls.append(url)
            # urls.append('%s%s' % (url, '&page=2')) # next page is not working atm
            # urls.append('%s%s' % (url, '&page=3'))
            # log_utils.log('urls = %s' % urls, log_utils.LOGDEBUG)

            links = []
            for x in urls:
                r = py_tools.ensure_str(scraper.get(x).content,
                                        errors='replace')
                if not r: continue
                list = client.parseDOM(r, 'tr', attrs={'class': 'tlr'})
                list += client.parseDOM(r, 'tr', attrs={'class': 'tlz'})
                for item in list:
                    links.append(item)
            threads = []
            for link in links:
                threads.append(workers.Thread(self.get_sources, link))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('EXTRATORRENT')
            return self.sources
    def sources_packs(self,
                      url,
                      hostDict,
                      search_series=False,
                      total_seasons=None,
                      bypass_filter=False):
        self.sources = []
        if not url: return self.sources
        try:
            self.scraper = cfscrape.create_scraper()
            self.search_series = search_series
            self.total_seasons = total_seasons
            self.bypass_filter = bypass_filter

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data['tvshowtitle'].replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.imdb = data['imdb']
            self.year = data['year']
            self.season_x = data['season']
            self.season_xx = self.season_x.zfill(2)

            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', self.title)
            queries = [
                self.search_link % quote_plus(query + ' S%s' % self.season_xx),
                self.search_link %
                quote_plus(query + ' Season %s' % self.season_x)
            ]
            if self.search_series:
                queries = [
                    self.search_link % quote_plus(query + ' Season'),
                    self.search_link % quote_plus(query + ' Complete')
                ]
            threads = []
            for url in queries:
                link = urljoin(self.base_link, url)
                threads.append(workers.Thread(self.get_sources_packs, link))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('EXTRATORRENT')
            return self.sources
Exemplo n.º 4
0
    def sources_packs(self,
                      data,
                      hostDict,
                      search_series=False,
                      total_seasons=None,
                      bypass_filter=False):
        self.sources = []
        if not data: return self.sources
        try:
            self.scraper = cfscrape.create_scraper()
            self.search_series = search_series
            self.total_seasons = total_seasons
            self.bypass_filter = bypass_filter

            self.title = data['tvshowtitle'].replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.imdb = data['imdb']
            self.year = data['year']
            self.season_x = data['season']
            self.season_xx = self.season_x.zfill(2)

            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', self.title)
            queries = [
                self.tvsearch.format(
                    quote_plus(query + ' S%s' % self.season_xx)),
                self.tvsearch.format(
                    quote_plus(query + ' Season %s' % self.season_x))
            ]
            if self.search_series:
                queries = [
                    self.tvsearch.format(quote_plus(query + ' Season')),
                    self.tvsearch.format(quote_plus(query + ' Complete'))
                ]
            threads = []
            for url in queries:
                link = ('%s%s' % (self.base_link, url)).replace('+', '-')
                threads.append(workers.Thread(self.get_sources_packs, link))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('LIMETORRENTS')
            return self.sources
Exemplo n.º 5
0
    def sources(self, data, hostDict):
        self.sources = []
        if not data: return self.sources
        try:
            self.scraper = cfscrape.create_scraper()

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.episode_title = data[
                'title'] if 'tvshowtitle' in data else None
            self.year = data['year']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else self.year

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
            urls = []
            url = self.tvsearch_link % quote_plus(
                query
            ) if 'tvshowtitle' in data else self.msearch_link % quote_plus(
                query)
            url = '%s%s' % (self.base_link, url)
            urls.append(url)
            urls.append(
                '%s%s' %
                (url, '&page=2'))  # next page seems to be working once again
            # urls.append('%s%s' % (url, '&page=3'))
            # log_utils.log('urls = %s' % urls)

            threads = []
            for url in urls:
                threads.append(workers.Thread(self.get_sources, url))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('EXTRATORRENT')
            return self.sources
Exemplo n.º 6
0
    def sources(self, data, hostDict):
        self.sources = []
        if not data: return self.sources
        try:
            self.scraper = cfscrape.create_scraper()

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = self.title.replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.episode_title = data[
                'title'] if 'tvshowtitle' in data else None
            self.year = data['year']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else self.year

            query = '%s %s' % (self.title, self.hdlr)
            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
            urls = []
            if 'tvshowtitle' in data:
                url = self.tvsearch.format(quote_plus(query))
            else:
                url = self.moviesearch.format(quote_plus(query))
            urls.append(url)

            url2 = url.replace('/1/', '/2/')
            urls.append(url2)
            threads = []
            for url in urls:
                link = ('%s%s' % (self.base_link, url)).replace('+', '-')
                threads.append(workers.Thread(self.get_sources, link))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self.sources
        except:
            source_utils.scraper_error('LIMETORRENTS')
            return self.sources
	def sources(self, url, hostDict):
		sources = []
		if not url: return sources
		try:
			scraper = cfscrape.create_scraper()
			data = parse_qs(url)
			data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

			title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
			title = title.replace('&', 'and').replace('Special Victims Unit', 'SVU')
			aliases = data['aliases']
			episode_title = data['title'] if 'tvshowtitle' in data else None
			year = data['year']
			hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else year

			if 'tvshowtitle' in data:
				query = '%s %s' % (title, hdlr)
				query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
				url = self.search_link % quote_plus(query)
			else:
				url = self.search_link % data['imdb']
			url = urljoin(self.base_link, url)
			# log_utils.log('url = %s' % url, log_utils.LOGDEBUG)
			r = py_tools.ensure_str(scraper.get(url).content, errors='replace')
			posts = client.parseDOM(r, 'div', attrs={'class': 'tgxtable'})
			if not posts: return sources
		except:
			source_utils.scraper_error('TORRENTGALAXY')
			return sources
		for post in posts:
			try:
				links = zip(
							re.findall(r'href\s*=\s*["\'](magnet:[^"\']+)["\']', post, re.DOTALL | re.I),
							re.findall(r'<span\s*class\s*=\s*["\']badge\s*badge-secondary["\']\s*style\s*=\s*["\']border-radius:4px;["\']>(.*?)</span>', post, re.DOTALL | re.I),
							re.findall(r'<span\s*title\s*=\s*["\']Seeders/Leechers["\']>\[<font\s*color\s*=\s*["\']green["\']><b>(.*?)<', post, re.DOTALL | re.I))
				for link in links:
					url = unquote_plus(link[0]).split('&tr')[0].replace(' ', '.')
					url = source_utils.strip_non_ascii_and_unprintable(url)
					hash = re.compile(r'btih:(.*?)&', re.I).findall(url)[0]

					name = url.split('&dn=')[1]
					name = source_utils.clean_name(name)
					if not source_utils.check_title(title, aliases, name, hdlr, year): continue
					name_info = source_utils.info_from_name(name, title, year, hdlr, episode_title)
					if source_utils.remove_lang(name_info): continue

					if not episode_title: #filter for eps returned in movie query (rare but movie and show exists for Run in 2020)
						ep_strings = [r'[.-]s\d{2}e\d{2}([.-]?)', r'[.-]s\d{2}([.-]?)', r'[.-]season[.-]?\d{1,2}[.-]?']
						if any(re.search(item, name.lower()) for item in ep_strings): continue
					try:
						seeders = int(link[2])
						if self.min_seeders > seeders: continue
					except: seeders = 0

					quality, info = source_utils.get_release_quality(name_info, url)
					try:
						dsize, isize = source_utils._size(link[1])
						info.insert(0, isize)
					except: dsize = 0
					info = ' | '.join(info)

					sources.append({'provider': 'torrentgalaxy', 'source': 'torrent', 'seeders': seeders, 'hash': hash, 'name': name, 'name_info': name_info, 'quality': quality,
												'language': 'en', 'url': url, 'info': info, 'direct': False, 'debridonly': True, 'size': dsize})
			except:
				source_utils.scraper_error('TORRENTGALAXY')
		return sources
Exemplo n.º 8
0
    def sources(self, data, hostDict):
        sources = []
        if not data: return sources
        try:
            self.scraper = cfscrape.create_scraper()
            self.key = cache.get(self._get_token,
                                 0.2)  # 800 secs token is valid for

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            year = data['year']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else year

            query = '%s %s' % (title, hdlr)
            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
            if 'tvshowtitle' in data:
                search_link = self.tvshowsearch.format(self.key, data['imdb'],
                                                       hdlr)
            else:
                search_link = self.msearch.format(self.key, data['imdb'])
            sleep(2.1)
            rjson = self.scraper.get(search_link).content
            if not rjson or 'torrent_results' not in str(rjson): return sources
            files = jsloads(rjson)['torrent_results']
        except:
            source_utils.scraper_error('TORRENTAPI')
            return sources
        for file in files:
            try:
                url = file["download"].split('&tr')[0]
                hash = re.search(r'btih:(.*?)&', url, re.I).group(1)
                name = source_utils.clean_name(unquote_plus(file["title"]))

                if not source_utils.check_title(title, aliases, name, hdlr,
                                                year):
                    continue
                name_info = source_utils.info_from_name(
                    name, title, year, hdlr, episode_title)
                if source_utils.remove_lang(name_info): continue

                if not episode_title:  #filter for eps returned in movie query (rare but movie and show exists for Run in 2020)
                    ep_strings = [
                        r'[.-]s\d{2}e\d{2}([.-]?)', r'[.-]s\d{2}([.-]?)',
                        r'[.-]season[.-]?\d{1,2}[.-]?'
                    ]
                    if any(
                            re.search(item, name.lower())
                            for item in ep_strings):
                        continue
                try:
                    seeders = int(file["seeders"])
                    if self.min_seeders > seeders: continue
                except:
                    seeders = 0

                quality, info = source_utils.get_release_quality(
                    name_info, url)
                try:
                    dsize, isize = source_utils.convert_size(file["size"],
                                                             to='GB')
                    info.insert(0, isize)
                except:
                    dsize = 0
                info = ' | '.join(info)

                sources.append({
                    'provider': 'torrentapi',
                    'source': 'torrent',
                    'seeders': seeders,
                    'hash': hash,
                    'name': name,
                    'name_info': name_info,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize
                })
            except:
                source_utils.scraper_error('TORRENTAPI')
        return sources
Exemplo n.º 9
0
    def sources_packs(self,
                      data,
                      hostDict,
                      search_series=False,
                      total_seasons=None,
                      bypass_filter=False):
        sources = []
        if not data: return sources
        if search_series:  # torrentapi does not have showPacks
            return sources
        try:
            self.scraper = cfscrape.create_scraper()
            self.key = cache.get(self._get_token,
                                 0.2)  # 800 secs token is valid for

            self.bypass_filter = bypass_filter

            self.title = data['tvshowtitle'].replace('&', 'and').replace(
                'Special Victims Unit', 'SVU')
            self.aliases = data['aliases']
            self.year = data['year']
            self.season_x = data['season']
            self.season_xx = self.season_x.zfill(2)
            search_link = self.tvshowsearch.format(self.key, data['imdb'],
                                                   'S%s' % self.season_xx)
            # log_utils.log('search_link = %s' % str(search_link))
            sleep(2.1)
            rjson = self.scraper.get(search_link).content
            if not rjson or 'torrent_results' not in str(rjson): return sources
            files = jsloads(rjson)['torrent_results']
        except:
            source_utils.scraper_error('TORRENTAPI')
            return sources
        for file in files:
            try:
                url = file["download"].split('&tr')[0]
                hash = re.search(r'btih:(.*?)&', url, re.I).group(1)
                name = source_utils.clean_name(unquote_plus(file["title"]))

                if not self.bypass_filter:
                    if not source_utils.filter_season_pack(
                            self.title, self.aliases, self.year, self.season_x,
                            name):
                        continue
                package = 'season'

                name_info = source_utils.info_from_name(name,
                                                        self.title,
                                                        self.year,
                                                        season=self.season_x,
                                                        pack=package)
                if source_utils.remove_lang(name_info): continue
                try:
                    seeders = int(file["seeders"])
                    if self.min_seeders > seeders: continue
                except:
                    seeders = 0

                quality, info = source_utils.get_release_quality(
                    name_info, url)
                try:
                    dsize, isize = source_utils.convert_size(file["size"],
                                                             to='GB')
                    info.insert(0, isize)
                except:
                    dsize = 0
                info = ' | '.join(info)

                sources.append({
                    'provider': 'torrentapi',
                    'source': 'torrent',
                    'seeders': seeders,
                    'hash': hash,
                    'name': name,
                    'name_info': name_info,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'package': package
                })
            except:
                source_utils.scraper_error('TORRENTAPI')
        return sources
Exemplo n.º 10
0
    def sources(self, url, hostDict):
        sources = []
        if not url: return sources
        try:
            scraper = cfscrape.create_scraper()
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            year = data['year']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else year

            query = '%s %s' % (title, hdlr)
            query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)
            # r = scraper.get(url).content
            r = py_tools.ensure_str(scraper.get(url).content, errors='replace')
            posts = client.parseDOM(r, 'div', attrs={'class': 'post'})
            if not posts: return sources
        except:
            source_utils.scraper_error('SCENERLS')
            return sources
        items = []
        for post in posts:
            try:
                content = client.parseDOM(post,
                                          "div",
                                          attrs={"class": "postContent"})
                size = re.findall(
                    r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                    content[0])[0]
                u = client.parseDOM(content, "h2")
                u = client.parseDOM(u, 'a', ret='href')
                u = [(i.strip('/').split('/')[-1], i, size) for i in u]
                items += u
            except:
                source_utils.scraper_error('SCENERLS')
                return sources
        for item in items:
            try:
                name = item[0]
                name = client.replaceHTMLCodes(name)
                if not source_utils.check_title(title, aliases, name, hdlr,
                                                year):
                    continue
                name_info = source_utils.info_from_name(
                    name, title, year, hdlr, episode_title)
                if source_utils.remove_lang(name_info): continue
                # check year for reboot/remake show issues if year is available-crap shoot
                # if 'tvshowtitle' in data:
                # if re.search(r'([1-3][0-9]{3})', name):
                # if not any(value in name for value in [year, str(int(year)+1), str(int(year)-1)]):
                # continue

                url = py_tools.ensure_text(client.replaceHTMLCodes(str(
                    item[1])),
                                           errors='replace')
                if url in str(sources): continue

                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid: continue

                quality, info = source_utils.get_release_quality(
                    name_info, url)
                try:
                    dsize, isize = source_utils._size(item[2])
                    info.insert(0, isize)
                except:
                    dsize = 0
                info = ' | '.join(info)

                sources.append({
                    'provider': 'scenerls',
                    'source': host,
                    'name': name,
                    'name_info': name_info,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize
                })
            except:
                source_utils.scraper_error('SCENERLS')
        return sources
Exemplo n.º 11
0
    def sources(self, url, hostDict):
        sources = []
        if not url: return sources
        try:
            scraper = cfscrape.create_scraper(delay=5)
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            year = data['year']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else year

            isSeasonQuery = False
            query = '%s %s' % (title, hdlr)
            query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
            # query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
            query = re.sub(r'\s', '-', query)

            if int(year) >= 2021: self.base_link = self.base_new
            else: self.base_link = self.base_old

            url = urljoin(self.base_link, query)
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)

            # r = scraper.get(url).content
            r = py_tools.ensure_str(scraper.get(url).content, errors='replace')
            if not r or 'nothing was found' in r:
                if 'tvshowtitle' in data:
                    season = re.search(r'S(.*?)E', hdlr).group(1)
                    query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '',
                                   title)
                    # query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', title)
                    query = re.sub(r'\s', '-', query)
                    query = query + "-S" + season
                    url = urljoin(self.base_link, query)
                    # r = scraper.get(url).content
                    r = py_tools.ensure_str(scraper.get(url).content,
                                            errors='replace')
                    isSeasonQuery = True
                else:
                    return sources
            if not r or 'nothing was found' in r: return sources
            # may need to add fallback to use self.search_link if nothing found
            posts = client.parseDOM(r, "div", attrs={"class": "content"})
            if not posts: return sources
        except:
            source_utils.scraper_error('RLSBB')
            return sources

        release_title = re.sub(r'[^A-Za-z0-9\s\.-]+', '',
                               title).replace(' ', '.')
        items = []
        count = 0
        for post in posts:
            if count >= 300:
                break  # to limit large link list and slow scrape time
            try:
                post_titles = re.findall(
                    r'(?:.*>|>\sRelease Name.*|\s)(%s.*?)<' % release_title,
                    post, re.I
                )  #parse all matching release_titles in each post(content) group
                items = []
                if len(post_titles) > 1:
                    index = 0
                    for name in post_titles:
                        start = post_titles[index].replace('[', '\\[').replace(
                            '(', '\\(').replace(')', '\\)').replace(
                                '+', '\\+').replace(' \\ ', ' \\\\ ')
                        end = (post_titles[index + 1].replace(
                            '[', '\\[').replace('(', '\\(').replace(
                                ')', '\\)').replace('+', '\\+')).replace(
                                    ' \\ ', ' \\\\ '
                                ) if index + 1 < len(post_titles) else ''
                        try:
                            container = re.findall(
                                r'(?:%s)([\S\s]+)(?:%s)' % (start, end), post,
                                re.I
                            )[0]  #parse all data between release_titles in multi post(content) group
                        except:
                            source_utils.scraper_error('RLSBB')
                            continue
                        try:
                            size = re.findall(
                                r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                                container)[0].replace(',', '.')
                        except:
                            size = '0'
                        container = client.parseDOM(container, 'a', ret='href')
                        items.append((name, size, container))
                        index += 1
                elif len(post_titles) == 1:
                    name = post_titles[0]
                    container = client.parseDOM(
                        post, 'a', ret='href'
                    )  #parse all links in a single post(content) group
                    try:
                        size = re.findall(
                            r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                            post)[0].replace(',', '.')
                    except:
                        size = '0'
                    items.append((name, size, container))
                else:
                    continue

                for group_name, size, links in items:
                    for i in links:
                        name = group_name
                        # if isSeasonQuery and hdlr not in name.upper():
                        # name = i.rsplit("/", 1)[-1]
                        # if hdlr not in name.upper(): continue
                        if hdlr not in name.upper():
                            name = i.rsplit("/", 1)[-1]
                            if hdlr not in name.upper(): continue

                        name = client.replaceHTMLCodes(name)
                        name = source_utils.strip_non_ascii_and_unprintable(
                            name)
                        name_info = source_utils.info_from_name(
                            name, title, year, hdlr, episode_title)

                        url = py_tools.ensure_text(client.replaceHTMLCodes(
                            str(i)),
                                                   errors='replace')
                        if url in str(sources): continue
                        if url.endswith(('.rar', '.zip', '.iso', '.part',
                                         '.png', '.jpg', '.bmp', '.gif')):
                            continue

                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if not valid: continue

                        quality, info = source_utils.get_release_quality(
                            name, url)
                        try:
                            if size == '0':
                                try:
                                    size = re.findall(
                                        r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                                        name)[0].replace(',', '.')
                                except:
                                    raise Exception()
                            dsize, isize = source_utils._size(size)
                            info.insert(0, isize)
                        except:
                            dsize = 0
                        info = ' | '.join(info)

                        sources.append({
                            'provider': 'rlsbb',
                            'source': host,
                            'name': name,
                            'name_info': name_info,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': True,
                            'size': dsize
                        })
                        count += 1
            except:
                source_utils.scraper_error('RLSBB')
        return sources
Exemplo n.º 12
0
    def sources(self, url, hostDict):
        sources = []
        if not url: return sources
        try:
            scraper = cfscrape.create_scraper(delay=5)
            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = title.replace('&', 'and').replace('Special Victims Unit',
                                                      'SVU')
            aliases = data['aliases']
            episode_title = data['title'] if 'tvshowtitle' in data else None
            year = data['year']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else year

            query = '%s %s' % (title, hdlr)
            query = re.sub(r'[^A-Za-z0-9\s\.-]+', '', query)
            url = self.search_link % quote_plus(query)
            url = urljoin(self.base_link, url).replace('%3A+', '+')
            # log_utils.log('url = %s' % url, log_utils.LOGDEBUG)
            # result = scraper.get(url).content
            result = py_tools.ensure_str(scraper.get(url).content,
                                         errors='replace')

            if not result or "Sorry, but you are looking for something that isn't here" in str(
                    result):
                return sources
            posts = client.parseDOM(result, "div", attrs={"class": "post"})
            if not posts: return sources
        except:
            source_utils.scraper_error('MAXRLS')
            return sources
        for post in posts:
            try:
                post_title = client.parseDOM(post,
                                             "h2",
                                             attrs={"class": "postTitle"})
                post_title = client.parseDOM(post_title, 'a')[0]
                if not source_utils.check_title(title, aliases, post_title,
                                                hdlr, year):
                    continue
                content = client.parseDOM(post,
                                          "div",
                                          attrs={"class": "postContent"})
                ltr = client.parseDOM(content, "p", attrs={"dir": "ltr"})
                if not ltr: continue

                for i in ltr:
                    if '<strong>' not in i or 'imdb.com' in i: continue
                    name = re.search(r'<strong>(.*?)<', i).group(1)
                    name = re.sub(r'(<span.*?>)', '',
                                  name).replace('</span>', '')
                    if title not in name:
                        continue  # IMDB and Links: can be in name so check for title match
                    name_info = source_utils.info_from_name(
                        name, title, year, hdlr, episode_title)
                    if source_utils.remove_lang(name_info): continue

                    links = client.parseDOM(i, "a", ret="href")
                    size = re.findall(
                        r'((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                        i, re.DOTALL)

                    for link in links:
                        url = link
                        if url in str(sources): continue
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if not valid: continue

                        quality, info = source_utils.get_release_quality(
                            name_info, url)
                        try:
                            dsize, isize = source_utils._size(size[0])
                            info.insert(0, isize)
                        except:
                            dsize = 0
                        info = ' | '.join(info)

                        sources.append({
                            'provider': 'maxrls',
                            'source': host,
                            'name': name,
                            'name_info': name_info,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': True,
                            'size': dsize
                        })
            except:
                source_utils.scraper_error('MAXRLS')
        return sources