Пример #1
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []
            hostDict = hostprDict + hostDict

            headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'}

            timer = control.Time(start=True)

            r = requests.get(url, headers=headers).content
            quality_check = re.compile('class="quality">(.+?)<').findall(r)

            for quality in quality_check:
                if 'HD' in quality:
                    quality = '720p'
                else:
                    quality = 'SD'

            links = re.compile('li class=.+?data-target="\W[A-Za-z]+\d"\sdata-href="(.+?)"').findall(r)
            for url in links:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('IWannaWatch - Timeout Reached')
                    break

                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid:
                    continue
                sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('IwannaWatch - Exception: \n' + str(failure))
            return sources
Пример #2
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            timer = control.Time(start=True)

            # headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'}
            r = self.cfscraper.get(url).content
            give_me = client.parseDOM(r, "div", attrs={"id": "lnk list-downloads"})
            for url in give_me:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('Shaanig - Timeout Reached')
                    break

                some_links = client.parseDOM(url, 'a', ret='href')
                for url in some_links:
                    quality = source_utils.check_sd_url(url)
                    url = url.split('?s=')[1]
                    final = urlparse.urljoin('http:', url)
                    sources.append({'source': 'Direct', 'quality': quality, 'language': 'en', 'url': final, 'direct': True, 'debridonly': False})

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('SHAANIG - Exception: \n' + str(failure))
            return sources
Пример #3
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            self._sources = []
            self.items = []

            if url is None:
                return self._sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            urls = []
            if 'tvshowtitle' in data:
                urls.append(self.tvsearch % (urllib.quote(query)))
                '''
                Why spam for multiple pages, since it gives plenty on each page?
                urls.append(self.tvsearch.format(urllib.quote(query), '2'))
                urls.append(self.tvsearch.format(urllib.quote(query), '3'))
                '''
            else:
                urls.append(self.moviesearch % (urllib.quote(query)))
                '''
                Why spam for multiple pages, since it gives plenty on each page?
                urls.append(self.moviesearch.format(urllib.quote(query), '2'))
                urls.append(self.moviesearch.format(urllib.quote(query), '3'))
                '''
            threads = []

            self.timer = control.Time(start=True)

            for url in urls:
                threads.append(workers.Thread(self._get_items, url,
                                              sc_timeout))
            [i.start() for i in threads]
            [i.join() for i in threads]

            threads2 = []
            for i in self.items:
                threads2.append(
                    workers.Thread(self._get_sources, i, sc_timeout))
            [i.start() for i in threads2]
            [i.join() for i in threads2]

            return self._sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('1337x - Exception: \n' + str(failure))
            return self._sources
Пример #4
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return sources

            hostDict = hostDict + hostprDict

            timer = control.Time(start=True)

            url = url + 'watch/'
            r = client.request(url)
            qual = re.findall('class="quality">(.+?)<', r)

            for i in qual:
                quality, info = source_utils.get_release_quality(i, i)

            r = client.parseDOM(r, "div", attrs={"id": "list-eps"})
            for i in r:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('CMoviesHD - Timeout Reached')
                    break

                t = re.findall('<a href="(.+?)"', i, re.DOTALL)
                for url in t:
                    # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                    if timer.elapsed() > sc_timeout:
                        log_utils.log('CMoviesHD - Timeout Reached')
                        break
                    t = client.request(url)
                    t = client.parseDOM(t,
                                        "div",
                                        attrs={"id": "content-embed"})
                    for u in t:
                        i = re.findall('src="(.+?)"',
                                       u)[0].replace('load_player.html?e=',
                                                     'episode/embed/')
                        i = client.request(i).replace("\\", "")
                        u = re.findall('"(https.+?)"', i)
                        for url in u:
                            valid, host = source_utils.is_host_valid(
                                url, hostDict)
                            if not valid:
                                continue
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'info': info,
                                'url': url,
                                'direct': False,
                                'debridonly': False
                            })
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('CMoviesHD - Exception: \n' + str(failure))
            return sources
Пример #5
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            timer = control.Time(start=True)

            r = client.request(url)
            try:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('MyProjectFreeTV - Timeout Reached')
                    return sources

                data = re.compile("callvalue\('.+?','.+?','(.+?)://(.+?)/(.+?)'\)", re.DOTALL).findall(r)
                for http, host, url in data:
                    url = '%s://%s/%s' % (http, host, url)
                    sources.append({
                        'source': host,
                        'quality': 'SD',
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
            except Exception:
                pass
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('MyProjectFreeTV - Exception: \n' + str(failure))
            return
Пример #6
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        sources = []
        hostDict = hostprDict + hostDict
        try:
            if url is None:
                return
            urldata = urlparse.parse_qs(url)
            urldata = dict((i, urldata[i][0]) for i in urldata)
            imdb = urldata['imdb']
            title = urldata['title']
            year = urldata['year']

            search = imdb.lower()
            url = urlparse.urljoin(
                self.base_link, self.search_link % (search.replace(' ', '+')))

            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'
            }

            timer = control.Time(start=True)

            request = self.scraper.get(url).content

            regex = re.compile(
                '<h2\s+\w{5}="\w{5}-\w{5}"><\w\shref=(.+?)\stitle="(.+?)"',
                re.DOTALL).findall(request)
            for Aurora, Atreides in regex:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('UWatchFree - Timeout Reached')
                    break

                if title.lower() in Atreides.lower():
                    if year in str(Atreides):
                        if 'hindi' in Atreides.lower():
                            continue
                        r = client.request(Aurora, headers=headers)

                        links = re.compile(
                            '<h2\s+c\w{4}\W"d\w{7}-l\w{4}"><a\s\w{4}=(.+?)\s[a-z]{6}\W',
                            re.DOTALL).findall(r)

                        for link in links:
                            sources.append({
                                'source': 'Direct',
                                'quality': '720p',
                                'language': 'en',
                                'url': link,
                                'direct': True,
                                'debridonly': False
                            })
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('UWatchFree - Exception: \n' + str(failure))
            return sources
Пример #7
0
    def sources(self, data, hostDict, hostprDict):
        try:
            session = self._createSession(data['UA'], data['cookies'], data['referer'])
            pageURL = data['pageURL']

            xbmc.sleep(1000)

            timer = control.Time(start=True)

            r = self._sessionGET(pageURL, session)
            if not r.ok:
                failure = traceback.format_exc()
                log_utils.log('PrimewireGR - Sources page request failed: \n' + str(data['pageURL']))
                return

            sources = []

            soup = BeautifulSoup(r.content, 'html.parser')
            mainDIV = soup.find('div', class_='actual_tab')
            for hostBlock in mainDIV.findAll('tbody'):
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('Primewire - Timeout Reached')
                    break

                # All valid host links always have an 'onclick' attribute.
                if 'onclick' in hostBlock.a.attrs:
                    onClick = hostBlock.a['onclick']
                    if 'Promo' in onClick:
                        continue  # Ignore ad links.

                    hostName = re.search('''['"](.*?)['"]''', onClick).group(1)
                    qualityClass = hostBlock.span['class']
                    quality = 'SD' if ('cam' not in qualityClass and 'ts' not in qualityClass) else 'CAM'

                    # Send data for the resolve() function below to use later, when the user plays an item.
                    unresolvedData = {
                        'pageURL': self.BASE_URL + hostBlock.a['href'],  # Not yet usable, see resolve().
                        'UA': data['UA'],
                        'cookies': session.cookies.get_dict(),
                        'referer': pageURL
                    }
                    sources.append(
                        {
                            'source': hostName,
                            'quality': quality,
                            'language': 'en',
                            'url': unresolvedData,
                            'direct': False,
                            'debridonly': False
                        }
                    )
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('PrimewireGR - Exception: \n' + str(failure))
            return sources
Пример #8
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            query = '%s %s' % (data['title'], data['year'])
            url = self.search_link % urllib.quote(query)
            url = urlparse.urljoin(self.base_link, url).replace('%20', '-')

            timer = control.Time(start=True)

            html = client.request(url)
            if html is None:
                return sources

            try:
                results = client.parseDOM(html, 'div', attrs={'class': 'ava1'})
                if results is None:
                    return sources
            except:
                return sources

            for torrent in results:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('YifiDDL - Timeout Reached')
                    return sources

                link = re.findall('a data-torrent-id=".+?" href="(magnet:.+?)" class=".+?" title="(.+?)"', torrent, re.DOTALL)
                for link, name in link:
                    link = str(client.replaceHTMLCodes(link).split('&tr')[0])
                    if link in str(sources):
                        continue

                    quality, info = source_utils.get_release_quality(name, name)
                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', torrent)[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except Exception:
                        pass

                    info = ' | '.join(info)

                    sources.append({'source': 'Torrent', 'quality': quality, 'language': 'en', 'url': link, 'info': info, 'direct': False, 'debridonly': True})
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('YifiDDL - Exception: \n' + str(failure))
            return
Пример #9
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return sources

            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'
            }
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['title']
            year = data['year']
            url = self.base_link + self.search_link % year

            timer = control.Time(start=True)

            html = client.request(url, headers=headers)
            if html is None:
                return sources

            # this method guarantees only results matching our formatted title get pulled out of the html
            regex_string = r'<tr><td class="link"><a href="{0}(.+?)"'.format(
                title)
            results = re.compile(regex_string).findall(html)
            for link in results:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('DL7Lavin - Timeout Reached')
                    break

                if 'Trailer' in link:
                    continue
                if 'Dubbed' in link:
                    continue
                url = self.base_link + self.search_link % year + title + link

                quality = source_utils.check_sd_url(url)
                sources.append({
                    'source': 'Direct',
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'direct': True,
                    'debridonly': False
                })
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('DL7LAVIN - Exception: \n' + str(failure))
            return sources
Пример #10
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            timer = control.Time(start=True)

            r = client.request(url)
            try:
                match = re.compile(
                    'class="search-title may-blank" >(.+?)</a>.+?<span class="search-result-icon search-result-icon-external"></span><a href="(.+?)://(.+?)/(.+?)" class="search-link may-blank" >'
                ).findall(r)
                for info, http, host, ext in match:
                    # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                    if timer.elapsed() > sc_timeout:
                        log_utils.log('Reddit - Timeout Reached')
                        break

                    if '2160' in info:
                        quality = '4K'
                    elif '1080' in info:
                        quality = '1080p'
                    elif '720' in info:
                        quality = 'HD'
                    elif '480' in info:
                        quality = 'SD'
                    else:
                        quality = 'SD'

                    url = '%s://%s/%s' % (http, host, ext)
                    if 'google' in host:
                        host = 'GDrive'
                    if 'Google' in host:
                        host = 'GDrive'
                    if 'GOOGLE' in host:
                        host = 'GDrive'

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': False
                    })
            except Exception:
                failure = traceback.format_exc()
                log_utils.log('Reddit - Exception: \n' + str(failure))
                return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('Reddit - Exception: \n' + str(failure))
            return sources
        return sources
Пример #11
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []
            if url is None:
                return sources

            hostDict = hostprDict + hostDict
            headers = {
                'Referer':
                url,
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'
            }

            timer = control.Time(start=True)

            r = requests.get(url, headers=headers).content
            links = re.compile("data-href='(.+?)'\s+data",
                               re.DOTALL).findall(r)
            for link in links:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('StreamDreams - Timeout Reached')
                    break

                if 'BDRip' in link:
                    quality = '720p'
                elif 'HD' in link:
                    quality = '720p'
                else:
                    quality = 'SD'

                info = source_utils.get_release_quality(url)
                host = link.split('//')[1].replace('www.', '')
                host = host.split('/')[0].split('.')[0].title()
                valid, host = source_utils.is_host_valid(link, hostDict)
                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': link,
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('StreamDreams - Exception: \n' + str(failure))
            return sources
Пример #12
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []
            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'
            }

            timer = control.Time(start=True)

            r = self.cfscraper.get(url, headers=headers).content
            quality_bitches = re.compile(
                '<strong>Quality:\s+</strong>\s+<span class="quality">(.+?)</span>',
                re.DOTALL).findall(r)

            for quality in quality_bitches:

                if 'HD' in quality:
                    quality = '720p'
                elif 'CAM' in quality:
                    quality = 'CAM'
                else:
                    quality = 'SD'

            match = re.compile('<iframe.+?src="(.+?)"').findall(r)
            for url in match:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('HackIMDB - Timeout Reached')
                    break

                if 'youtube' in url:
                    continue
                valid, hoster = source_utils.is_host_valid(url, hostDict)
                if not valid:
                    continue
                sources.append({
                    'source': hoster,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'direct': False,
                    'debridonly': False
                })

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('HackIMDB - Exception: \n' + str(failure))
            return sources
Пример #13
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            self._sources = []
            if url is None:
                return self._sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query = cleantitle.geturl(query)
            url = urlparse.urljoin(self.base_link, query)

            self.timer = control.Time(start=True)

            shell = requests.Session()

            headers = {
                'Referer':
                url,
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'
            }
            r = shell.get(url, headers=headers)
            r = r.headers['Location']
            r = shell.get(r).content
            posts = dom_parser2.parse_dom(r, 'li', {
                'class': re.compile('.+?'),
                'id': re.compile('comment-.+?')
            })
            self.hostDict = hostDict + hostprDict
            threads = []

            for i in posts:
                threads.append(
                    workers.Thread(self._get_sources, i.content, sc_timeout))
            [i.start() for i in threads]
            [i.join() for i in threads]

            return self._sources
        except Exception:
            return self._sources
Пример #14
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        sources = []
        try:
            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'
            }

            timer = control.Time(start=True)

            r = client.request(url, headers=headers)
            match = re.compile('data-video="(.+?)">').findall(r)
            for url in match:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('WatchSeriesRU - Timeout Reached')
                    break

                if 'vidcloud' in url:
                    url = urlparse.urljoin('https:', url)
                    r = client.request(url, headers=headers)
                    regex = re.compile("file: '(.+?)'").findall(r)
                    for direct_links in regex:
                        sources.append({
                            'source': 'cdn',
                            'quality': 'SD',
                            'language': 'en',
                            'url': direct_links,
                            'direct': False,
                            'debridonly': False
                        })

                else:
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    sources.append({
                        'source': host,
                        'quality': 'SD',
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('WatchSeriesRU - Exception: \n' + str(failure))
            return sources
Пример #15
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []
            if url is None:
                return sources

            timer = control.Time(start=True)

            r = self.scraper.get(url).content
            quality = re.findall(">(\w+)<\/p", r)
            if quality[0] == "HD":
                quality = "720p"
            else:
                quality = "SD"
            r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
            r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]

            hostDict = hostprDict + hostDict
            for i in r[0]:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('FreeFMovies - Timeout Reached')
                    break

                url = {
                    'url': i.attrs['href'],
                    'data-film': i.attrs['data-film'],
                    'data-server': i.attrs['data-server'],
                    'data-name': i.attrs['data-name']
                }
                url = urllib.urlencode(url)
                valid, host = source_utils.is_host_valid(i.content, hostDict)
                if valid:
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('FreeFMovies - Exception: \n' + str(failure))
            return sources
Пример #16
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return sources

            url = urlparse.urljoin(self.base_link, url)

            timer = control.Time(start=True)

            r = proxy.request(url, 'tv shows')

            links = client.parseDOM(r, 'a', ret='href', attrs={'target': '.+?'})
            links = [x for y, x in enumerate(links) if x not in links[:y]]

            for i in links:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('XWatchSeries - Timeout Reached')
                    break

                try:
                    url = i
                    url = proxy.parse(url)
                    url = urlparse.parse_qs(urlparse.urlparse(url).query)['r'][0]
                    url = url.decode('base64')
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if host not in hostDict:
                        raise Exception()
                    host = host.encode('utf-8')

                    sources.append({'source': host, 'quality': 'SD', 'language': 'en',
                                    'url': url, 'direct': False, 'debridonly': False})
                except Exception:
                    pass

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('XWatchSeries - Exception: \n' + str(failure))
            return sources
Пример #17
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return sources

            headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'}
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            hldr = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
            season = 'S%02d/' % int(data['season'])
            title = data['tvshowtitle']

            '''
            Check for season directory, no need for extra checks. Path is there or it's not
            '''
            url = urlparse.urljoin(self.base_link, self.search_link % (title, season))

            timer = control.Time(start=True)

            results = client.request(url, headers=headers)
            if results is None:
                return sources

            results = re.compile('<a href="(.+?)"').findall(results)
            for link in results:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('DL3F2M - Timeout Reached')
                    break

                if link.startswith('.') or link.startswith('?'):
                    continue
                if hldr in link:
                    link = urlparse.urljoin(url, link)
                    quality = source_utils.check_sd_url(link)
                    sources.append({'source': 'Direct', 'quality': quality, 'language': 'en',
                                    'url': link, 'direct': True, 'debridonly': False})
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('DL3F2M.IO - Exception: \n' + str(failure))
            return sources
Пример #18
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return sources

            hostDict = hostDict + hostprDict

            timer = control.Time(start=True)

            html = client.request(url)
            links = re.compile('<iframe.+?src="(.+?)"',
                               re.DOTALL).findall(html)
            for link in links:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('CMovies - Timeout Reached')
                    break

                if not link.startswith('http'):
                    link = "https:" + link
                valid, host = source_utils.is_host_valid(link, hostDict)
                if not valid:
                    continue

                quality, info = source_utils.get_release_quality(link, link)
                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': link,
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('CMovies - Exception: \n' + str(failure))
            return sources
Пример #19
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []
            if url is None:
                return sources
            hostDict = hostprDict + hostDict
            # headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36'}

            timer = control.Time(start=True)

            r = self.scraper.get(url).content
            qual = re.compile('<span class="calidad2">(.+?)</span>').findall(r)
            for qcheck in qual:
                quality, info = source_utils.get_release_quality(
                    qcheck, qcheck)

            links = re.compile('<iframe src="(.+?)"', re.DOTALL).findall(r)

            for link in links:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('Movie4kis - Timeout Reached')
                    break

                valid, host = source_utils.is_host_valid(link, hostDict)
                if not valid:
                    continue
                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': link,
                    'direct': False,
                    'debridonly': False
                })
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('Movie4kis - Exception: \n' + str(failure))
            return sources
Пример #20
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            self._sources = []
            if url is None:
                return self._sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else data['year']
            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) \
                if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
            query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.search.format('8', urllib.quote(query))
            else:
                url = self.search.format('4', urllib.quote(query))
            self.hostDict = hostDict + hostprDict

            self.timer = control.Time(start=True)

            html = self.scraper.get(url).content
            if html is None:
                log_utils.log('TorrentsDL - Website Timed Out')
                return self._sources

            threads = []
            for i in re.findall(r'<item>(.+?)</item>', html, re.DOTALL):
                threads.append(workers.Thread(self._get_items, i, sc_timeout))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self._sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('TorrentsDL - Exception: \n' + str(failure))
            return self._sources
Пример #21
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            self._sources = []
            self.items = []
            if url is None:
                return self._sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            self.title = data[
                'tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else data['year']
            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.tvsearch.format(urllib.quote(query))
                url = urlparse.urljoin(self.base_link, url)
            else:
                url = self.moviesearch.format(urllib.quote(query))
                url = urlparse.urljoin(self.base_link, url)

            self.timer = control.Time(start=True)

            self._get_items(url, sc_timeout)
            self.hostDict = hostDict + hostprDict
            threads = []
            for i in self.items:
                threads.append(workers.Thread(self._get_sources, i,
                                              sc_timeout))
            [i.start() for i in threads]
            [i.join() for i in threads]
            return self._sources
        except Exception:
            return self._sources
Пример #22
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        hostDict = hostprDict + hostDict
        try:
            sources = []
            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'
            }

            timer = control.Time(start=True)

            r = client.request(url, headers=headers)
            if r is None:
                return sources
            match = re.findall('<td align="center"><strong><a href="(.+?)"', r,
                               re.DOTALL)
            for url in match:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('CoolMovieZone - Timeout Reached')
                    break

                quality = source_utils.check_sd_url(url)
                valid, host = source_utils.is_host_valid(url, hostDict)
                if not valid:
                    continue
                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'direct': False,
                    'debridonly': False
                })
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('CoolMovieZone - Exception: \n' + str(failure))
            return
        return sources
Пример #23
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return

            timer = control.Time(start=True)

            r = client.request(url, cookie='check=2')

            m = dom_parser.parse_dom(r, 'table', attrs={'class':
                                                        'show_links'})[0]
            links = re.findall('k">(.*?)<.*?f="(.*?)"', m.content)
            for link in links:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('TVBox - Timeout Reached')
                    break

                try:
                    sources.append({
                        'source': link[0],
                        'quality': 'SD',
                        'language': 'en',
                        'url': link[1],
                        'direct': False,
                        'debridonly': False
                    })
                except Exception:
                    pass

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('TVBox - Exception: \n' + str(failure))
            return sources
Пример #24
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []
            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'
            }

            timer = control.Time(start=True)

            r = client.request(url, headers=headers)
            try:
                match = re.compile(
                    '<iframe src="(.+?)://(.+?)/(.+?)"').findall(r)
                for http, host, url in match:
                    # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                    if timer.elapsed() > sc_timeout:
                        log_utils.log('PutLocker - Timeout Reached')
                        break

                    url = '%s://%s/%s' % (http, host, url)
                    sources.append({
                        'source': host,
                        'quality': 'HD',
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
            except Exception:
                return
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('Putlocker - Exception: \n' + str(failure))
            return sources
        return sources
Пример #25
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            query = '%s Season %d Episode %d' % (
                data['tvshowtitle'], int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s' % (data['title'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            year = data['year']
            kcus_snwolc = cleantitle.getsearch(query.lower())
            url = urlparse.urljoin(
                self.base_link,
                self.search_link % (kcus_snwolc.replace(' ', '+')))

            timer = control.Time(start=True)

            shell = requests.Session()
            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'
            }
            r = shell.get(url, headers=headers).content

            scrape = re.compile(
                '<div data-movie-id=.+?class="ml-item">\s+<a href="(.+?)" data-url="" class="ml-mask jt".+?oldtitle="(.+?)"'
            ).findall(r)

            for url, title_data in scrape:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('GoldMovies - Timeout Reached')
                    break

                if cleantitle.getsearch(query).lower() == cleantitle.getsearch(
                        title_data).lower():
                    r = shell.get(url, headers=headers).content
                    year_data = re.compile(
                        '<strong>Release:\s+</strong>\s+<a href=.+?rel="tag">(.+?)</a>'
                    ).findall(r)
                    if year in str(year_data):
                        if 'tvshowtitle' in data:
                            year is None

                    regex_a_bitch = re.compile(
                        '<input type="hidden" id="link" name="link" value="(.+?)"'
                    ).findall(r)
                    for url in regex_a_bitch:
                        post_link = 'http://instalyser.com/form3.php'
                        payload = {'title': url, 'submit': 'Download'}
                        post_it = shell.post(post_link,
                                             headers=headers,
                                             data=payload)
                        response = post_it.content

                        gold_links = re.findall(
                            r'<[^\d]\s\w+\=\"(.+?)\"\s[^\d]{6}\=\"\w{6}\">',
                            response)
                        for url in gold_links:
                            quality, info = source_utils.get_release_quality(
                                url, url)
                            sources.append({
                                'source': 'Direct',
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': True,
                                'debridonly': False
                            })

                return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('GoldMovies - Exception: \n' + str(failure))
            return sources
Пример #26
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return sources
            # log_utils.log('Filmxy - Sources - url: ' + str(url))
            # PLEASE KEEP THIS FIX PRIVATE, THANKS.
            # cust_headers = {
            #     'Host': 'www.filmxy.live',
            #     'Connection': 'keep-alive',
            #     'Origin': 'https://www.filmxy.live',
            #     'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36',
            #     'Accept': '*/*',
            #     'Referer': 'https://www.filmxy.live/',
            #     'Accept-Encoding': 'gzip, deflate',
            #     'Accept-Language': 'en-US,en;q=0.9'
            # }

            timer = control.Time(start=True)

            r = self.shellscrape.get(url, headers=self.shell_headers).content
            streams = re.compile(
                'data-player="&lt;[A-Za-z]{6}\s[A-Za-z]{3}=&quot;(.+?)&quot;',
                re.DOTALL).findall(r)

            try:
                link_bin = re.compile(
                    '<div id="tab-download".+?<a href="(.+?)"',
                    re.DOTALL).findall(r)[0]
                link_bin = link_bin.rstrip()
                r = self.shellscrape.get(link_bin,
                                         headers=self.shell_headers).content

                dlinks1080 = client.parseDOM(r,
                                             'div',
                                             attrs={'class':
                                                    'link-panel row'})[1]
                dlinks1080 = client.parseDOM(dlinks1080, 'a', ret='href')

                for links in dlinks1080:
                    if any(x in links for x in ['mirrorace', 'sendit']):
                        continue
                    host = links.split('//')[1].replace('www.', '')
                    host = host.split('/')[0].lower()
                    sources.append({
                        'source': host,
                        'quality': '1080p',
                        'language': 'en',
                        'url': links,
                        'direct': False,
                        'debridonly': False
                    })
                    # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                    if timer.elapsed() > sc_timeout:
                        log_utils.log('300MBFilms - Timeout Reached')
                        break
            except Exception:
                pass

            for link in streams:
                quality = source_utils.check_sd_url(link)
                host = link.split('//')[1].replace('www.', '')
                host = host.split('/')[0].lower()
                '''
                Now source_utils can't strip quality on some of these links. It will drop them
                down to SD. So i say we try this.
                '''
                if quality == 'SD':
                    quality = 'HD'
                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': link,
                    'direct': False,
                    'debridonly': False
                })
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('300MBFilms - Timeout Reached')
                    break
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('FilmXY - Exception: \n' + str(failure))
            return sources
Пример #27
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return sources

            headers = {
                'User-Agent':
                'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:67.0) Gecko/20100101 Firefox/67.0'
            }
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            hldr = 'S%02dE%02d' % (int(data['season']), int(data['episode']))
            season = 'S%02d/' % int(data['season'])
            title = data['tvshowtitle']
            '''
            Check for season directory, no need for extra checks. Path is there or it's not
            '''
            url = urlparse.urljoin(self.base_link,
                                   self.search_link % (title, season))

            timer = control.Time(start=True)

            results = client.request(url, headers=headers)
            if results is None:
                return sources
            '''
            All results at this level are now subfolders for resolution (1080p, HD, 2160p, etc)
            '''
            results = re.compile(
                '<tr><td class="link"><a href="(.+?)"').findall(results)
            for dirlink in results:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('DL4LavinTV - Timeout Reached')
                    break

                if dirlink.startswith('.') or dirlink.startswith('?'):
                    continue
                sublink = urlparse.urljoin(url, dirlink)
                '''
                Ok, so, if the url ends in a / then this is a folder, and we need to dig deeper to
                find the season episodes baaaaaaaby
                Otherwise, the season episodes are NOT in subfolders for resolution
                '''
                if dirlink.endswith('/'):
                    subhtml = client.request(sublink, headers=headers)
                    subres = re.compile('<tr><td class="link"><a href="(.+?)"'
                                        ).findall(subhtml)
                    for link in subres:
                        if link.startswith('.') or link.startswith('?'):
                            continue
                        if hldr in link:
                            link = urlparse.urljoin(sublink, link)
                            quality = source_utils.check_sd_url(link)
                            sources.append({
                                'source': 'Direct',
                                'quality': quality,
                                'language': 'en',
                                'url': link,
                                'direct': True,
                                'debridonly': False
                            })
                else:
                    if hldr in dirlink:
                        link = urlparse.urljoin(sublink, dirlink)
                        quality = source_utils.check_sd_url(link)
                        sources.append({
                            'source': 'Direct',
                            'quality': quality,
                            'language': 'en',
                            'url': link,
                            'direct': True,
                            'debridonly': False
                        })
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('DL4.LAVINTV - Exception: \n' + str(failure))
            return sources
Пример #28
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        if url is None:
            return

        api_key = self.get_api()

        if not api_key:
            return

        sources = []

        try:
            content_type = 'episode' if 'tvshowtitle' in url else 'movie'
            match = 'all'
            moderated = 'no' if content_type == 'episode' else self.mod_level
            search_in = ''

            if content_type == 'movie':
                title = url['title'].replace(':', ' ').replace(' ',
                                                               '+').replace(
                                                                   '&', 'and')
                title = title.replace("'", "")
                year = url['year']
                link = '{0}+{1}'.format(title, year)

            elif content_type == 'episode':
                title = url['tvshowtitle'].replace(':', ' ').replace(
                    ' ', '+').replace('&', 'and')
                season = int(url['season'])
                episode = int(url['episode'])
                # season00 = 's%02d' % (season)
                season00_ep00_SE = 's%02de%02d' % (season, episode)
                season0_ep0_SE = 's%de%d' % (season, episode)
                season00_ep00_X = '%02dx%02d' % (season, episode)
                season0_ep0_X = '%dx%d' % (season, episode)
                season0_ep00_X = '%dx%02d' % (season, episode)
                link = '%s+%s' \
                       % (title, season00_ep00_SE)

            s = requests.Session()
            link = (
                self.base_link + self.meta_search_link %
                (api_key, link, match, moderated, search_in, self.search_limit)
            )

            timer = control.Time(start=True)

            p = s.get(link)
            p = json.loads(p.text)

            if p['status'] != 'ok':
                return

            files = p['files']

            for i in files:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('Furk - Timeout Reached')
                    break

                if i['is_ready'] == '1' and i['type'] == 'video':
                    try:
                        source = 'SINGLE'
                        if int(i['files_num_video']) > 3:
                            source = 'PACK [B](x%02d)[/B]' % int(
                                i['files_num_video'])
                        file_name = i['name']
                        file_id = i['id']
                        file_dl = i['url_dl']
                        if content_type == 'episode':
                            url = '%s<>%s<>%s<>%s<>%s<>%s' % (
                                file_id, season00_ep00_SE, season0_ep0_SE,
                                season00_ep00_X, season0_ep0_X, season0_ep00_X)
                            details = self.details(file_name, i['size'],
                                                   i['video_info'])
                        else:
                            url = '%s<>%s<>%s+%s' % (file_id, 'movie', title,
                                                     year)
                            details = self.details(file_name, i['size'],
                                                   i['video_info']).split('|')
                            details = details[0] + ' | ' + file_name.replace(
                                '.', ' ')

                        quality = source_utils.get_release_quality(
                            file_name, file_dl)
                        sources.append({
                            'source': source,
                            'quality': quality[0],
                            'language': "en",
                            'url': url,
                            'info': details,
                            'direct': True,
                            'debridonly': False
                        })
                    except Exception:
                        pass
                else:
                    continue
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('FurkIt - Exception: \n' + str(failure))
            pass
Пример #29
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.geturl(title)

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|<|>|\|)', ' ', query)
            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            timer = control.Time(start=True)

            html = self.scraper.get(url).content
            if html is None:
                log_utils.log('TPB - Website Timed Out')
                return sources
            html = html.replace('&nbsp;', ' ')
            try:
                results = client.parseDOM(html,
                                          'table',
                                          attrs={'id': 'searchResult'})[0]
            except Exception:
                return sources

            rows = re.findall('<tr(.+?)</tr>', results, re.DOTALL)
            if rows is None:
                return sources

            for entry in rows:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('TPB - Timeout Reached')
                    break

                try:
                    try:
                        name = re.findall(
                            'class="detLink" title=".+?">(.+?)</a>', entry,
                            re.DOTALL)[0]
                        name = client.replaceHTMLCodes(name)
                        # t = re.sub('(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)', '', name, flags=re.I)
                        if not cleantitle.get(title) in cleantitle.get(name):
                            continue
                    except Exception:
                        continue
                    try:
                        y = re.findall(
                            '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                            name)[-1].upper()
                        if not y == hdlr:
                            continue
                    except Exception:
                        continue

                    try:
                        seeders = int(
                            re.findall('<td align="right">(.+?)</td>', entry,
                                       re.DOTALL)[0])
                    except Exception:
                        continue
                    if self.min_seeders > seeders:
                        continue

                    try:
                        link = 'magnet:%s' % (re.findall(
                            'a href="magnet:(.+?)"', entry, re.DOTALL)[0])
                        link = str(
                            client.replaceHTMLCodes(link).split('&tr')[0])
                    except Exception:
                        continue

                    quality, info = source_utils.get_release_quality(
                        name, name)

                    try:
                        size = re.findall(
                            '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                            entry)[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except Exception:
                        pass

                    info = ' | '.join(info)

                    sources.append({
                        'source': 'Torrent',
                        'quality': quality,
                        'language': 'en',
                        'url': link,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except Exception:
                    failure = traceback.format_exc()
                    log_utils.log('TPB - Cycle Broken: \n' + str(failure))
                    continue

            check = [i for i in sources if not i['quality'] == 'CAM']
            if check:
                sources = check

            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('TPB - Exception: \n' + str(failure))
            return sources
Пример #30
0
    def sources(self, url, hostDict, hostprDict, sc_timeout):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            imdb = data['imdb']

            link = ''

            try:
                query = urlparse.urljoin(self.base_link, self.search_link)
                result = client.request(query)
                m = re.findall('Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse', result, re.DOTALL)
                m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
                if m:
                    link = m
                else:
                    query = urlparse.urljoin(self.base_link, self.search_link2)

                    timer = control.Time(start=True)

                    result = client.request(query)
                    m = re.findall('Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse', result, re.DOTALL)
                    m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
                    if m:
                        link = m
                    else:
                        query = urlparse.urljoin(self.base_link, self.search_link3)
                        result = client.request(query)
                        m = re.findall('Movie Size:(.+?)<.+?href="(.+?)".+?href="(.+?)"\s*onMouse', result, re.DOTALL)
                        m = [(i[0], i[1], i[2]) for i in m if imdb in i[1]]
                        if m:
                            link = m
            except Exception:
                return

            if link == '':
                return sources

            for item in link:
                # Stop searching 8 seconds before the provider timeout, otherwise might continue searching, not complete in time, and therefore not returning any links.
                if timer.elapsed() > sc_timeout:
                    log_utils.log('DivxCrawler - Timeout Reached')
                    break

                try:
                    quality, info = source_utils.get_release_quality(item[2], None)
                    try:
                        size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))', item[0])[-1]
                        div = 1 if size.endswith(('GB', 'GiB')) else 1024
                        size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                        size = '%.2f GB' % size
                        info.append(size)
                    except Exception:
                        pass
                    info = ' | '.join(info)
                    url = item[2]
                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    sources.append({'source': 'DL', 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                                    'direct': True, 'debridonly': False})
                except Exception:
                    pass
            return sources
        except Exception:
            failure = traceback.format_exc()
            log_utils.log('DivxCrawler - Exception: \n' + str(failure))
            return sources