Пример #1
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['max-rls.com']
     self.base_link = 'https://max-rls.com'
     self.search_link = '/?s=%s&submit=Find'
     self.headers = {'User-Agent': client.agent()}
    def _get_items(self, url):
        items = []
        try:
            headers = {'User-Agent': client.agent()}
            r = client.request(url, headers=headers)
            posts = client.parseDOM(r, 'tr', attrs={'class': 't-row'})
            posts = [i for i in posts if not 'racker:' in i]
            for post in posts:
                data = client.parseDOM(post, 'a', ret='href')
                url = [i for i in data if 'magnet:' in i][0]
                name = client.parseDOM(post, 'a', ret='title')[0]
                t = name.split(self.hdlr)[0]

                if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue

                try:
                    y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
                except BaseException:
                    y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
                if not y == self.hdlr: continue

                try:
                    size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                    dsize, isize = source_utils._size(size)
                except BaseException:
                    dsize, isize = 0.0, ''

                items.append((name, url, isize, dsize))
            return items
        except:
            log_utils.log('glodls2_exc', 1)
            return items
Пример #3
0
    def sources(self, url, hostDict, hostprDict):
        self._sources = []
        try:
            if url is None:
                return self._sources

            if debrid.status() is False:
                return self._sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            self.title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            self.title = cleantitle.get_query(self.title)
            self.hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (self.title, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (self.title, data['year'])
            query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            if 'tvshowtitle' in data:
                url = self.search.format('8', quote(query))
            else:
                url = self.search.format('4', quote(query))

            self.hostDict = hostDict + hostprDict
            headers = {'User-Agent': client.agent()}
            _html = client.request(url, headers=headers)
            threads = []
            for i in re.findall(r'<item>(.+?)</item>', _html, re.DOTALL):
                threads.append(workers.Thread(self._get_items, i))
            [i.start() for i in threads]
            [i.join() for i in threads]

            return self._sources
        except BaseException:
            return self._sources
Пример #4
0
 def _get_items(self, url):
     try:
         headers = {'User-Agent': client.agent()}
         r = client.request(url, headers=headers)
         posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0]
         posts = client.parseDOM(posts, 'tr')
         for post in posts:
             data = dom.parse_dom(post, 'a', req='href')[1]
             link = urlparse.urljoin(self.base_link, data.attrs['href'])
             name = data.content
             t = name.split(self.hdlr)[0]
             if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(
                     self.title):
                 continue
             try:
                 y = re.findall(
                     '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                     name, re.I)[-1].upper()
             except BaseException:
                 y = re.findall(
                     '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name,
                     re.I)[-1].upper()
             if not y == self.hdlr: continue
             try:
                 size = re.findall(
                     '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                     post)[0]
                 dsize, isize = utils._size(size)
             except BaseException:
                 dsize, isize = 0, ''
             self.items.append((name, link, isize, dsize))
         return self.items
     except BaseException:
         return self.items
Пример #5
0
    def _get_sources(self, name, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = cfScraper.get(url, headers=headers).content
            r = ensure_text(r, errors='replace')
            name = client.replaceHTMLCodes(name)
            try:
                _name = name.lower().replace('rr',
                                             '').replace('nf', '').replace(
                                                 'ul', '').replace('cu', '')
            except:
                _name = name
            l = dom_parser2.parse_dom(r, 'pre', {'class': 'links'})
            s = ''
            for i in l:
                s += i.content
            urls = re.findall(
                r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''',
                i.content,
                flags=re.MULTILINE | re.DOTALL)
            urls = [
                i for i in urls if not i.endswith(('.rar', '.zip', '.iso',
                                                   '.idx', '.sub', '.srt'))
            ]
            for url in urls:
                if url in str(self.sources):
                    continue

                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid:
                    continue
                host = client.replaceHTMLCodes(host)
                #host = host.encode('utf-8')
                quality, info = source_utils.get_release_quality(name, url)
                try:
                    size = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        name)[0]
                    dsize, isize = source_utils._size(size)
                except BaseException:
                    dsize, isize = 0.0, ''
                info.insert(0, isize)
                info = ' | '.join(info)
                self.sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': _name
                })
        except:
            log_utils.log('RMZ - Exception', 1)
            pass
Пример #6
0
    def sources(self, url, hostDict, hostprDict):

        self.sources = []

        try:
            if url is None:
                return self.sources

            if debrid.status() is False:
                return self.sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)

            hdlr = data['year']
            hdlr2 = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else ''
            imdb = data['imdb']

            url = self.search(title, hdlr)
            headers = {'User-Agent': client.agent()}
            r = cfScraper.get(url, headers=headers).content
            r = ensure_text(r, errors='replace')
            if hdlr2 == '':
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'releases'})[0]
            else:
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'episodes'})[0]
            r = dom_parser2.parse_dom(r.content, 'a', req=['href'])
            r = [(i.content, urljoin(self.base_link, i.attrs['href']))
                 for i in r if i and i.content != 'Watch']
            if hdlr2 != '':
                r = [(i[0], i[1]) for i in r if hdlr2.lower() in i[0].lower()]

            self.hostDict = hostDict + hostprDict
            threads = []

            for i in r:
                threads.append(workers.Thread(self._get_sources, i[0], i[1]))
            [i.start() for i in threads]

            alive = [x for x in threads if x.is_alive() is True]
            while alive:
                alive = [x for x in threads if x.is_alive() is True]
                time.sleep(0.1)
            return self.sources
        except:
            log_utils.log('RMZ - Exception', 1)
            return self.sources
Пример #7
0
 def search(self, title, year):
     try:
         url = urljoin(self.base_link,
                       self.search_link % (quote_plus(title)))
         headers = {'User-Agent': client.agent()}
         r = cfScraper.get(url, headers=headers).content
         r = ensure_text(r, errors='replace')
         r = dom_parser2.parse_dom(r, 'div', {'class': 'list_items'})[0]
         r = dom_parser2.parse_dom(r.content, 'li')
         r = [(dom_parser2.parse_dom(i, 'a', {'class': 'title'}))
              for i in r]
         r = [(i[0].attrs['href'], i[0].content) for i in r]
         r = [(urljoin(self.base_link, i[0])) for i in r
              if cleantitle.get(title) in cleantitle.get(i[1])
              and year in i[1]]
         if r: return r[0]
         else: return
     except:
         log_utils.log('RMZ - Exception', 1)
         return
Пример #8
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            imdb = data['imdb']
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                url = self.searchShow(title, int(data['season']), int(data['episode']), aliases, headers)
            else:
                url = self.searchMovie(title, data['year'], aliases, headers)

            r = client.request(url, headers=headers, output='extended', timeout='10')

            #if imdb not in r[0]:
                #raise Exception()

            try:
                cookie = r[4]
                headers = r[3]
            except:
                cookie = r[3]
                headers = r[2]
            result = r[0]

            try:
                r = re.findall('(https:.*?redirector.*?)[\'\"]', result)
                for i in r:
                    try:
                        sources.append(
                            {'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en',
                             'url': i, 'direct': True, 'debridonly': False})
                    except:
                        pass
            except:
                pass

            try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
            except: auth = 'false'
            auth = 'Bearer %s' % unquote_plus(auth)
            headers['Authorization'] = auth
            headers['Referer'] = url

            u = '/ajax/vsozrflxcw.php'
            self.base_link = client.request(self.base_link, headers={'User-Agent': client.agent()}, output='geturl')
            u = urljoin(self.base_link, u)

            action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'

            tim = str(int(time.time())) if six.PY2 else six.ensure_binary(str(int(time.time())))
            elid = quote(base64.encodestring(tim)).strip()

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]

            post = {'action': action, 'idEl': idEl, 'token': token, 'nopop': '', 'elid': elid}
            post = urlencode(post)
            cookie += ';%s=%s' % (idEl, elid)
            headers['Cookie'] = cookie

            r = client.request(u, post=post, headers=headers, cookie=cookie, XHR=True)
            r = str(json.loads(r))

            r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r)

            for i in r:
                try:
                    if 'google' in i:
                        quality = 'SD'

                        if 'googleapis' in i:
                            try:
                                quality = source_utils.check_sd_url(i)
                            except Exception:
                                pass

                        if 'googleusercontent' in i:
                            i = directstream.googleproxy(i)
                            try:
                                quality = directstream.googletag(i)[0]['quality']
                            except Exception:
                                pass

                        sources.append({'source': 'gvideo', 'quality': quality, 'language': 'en', 'url': i,
                                        'direct': True, 'debridonly': False})

                    elif 'llnwi.net' in i or 'vidcdn.pro' in i:
                        try:
                            quality = source_utils.check_sd_url(i)
                            sources.append({'source': 'CDN', 'quality': quality, 'language': 'en', 'url': i,
                                            'direct': True, 'debridonly': False})

                        except Exception:
                            pass
                    else:
                        valid, hoster = source_utils.is_host_valid(i, hostDict)
                        if valid:
                            if 'vidnode.net' in i:
                                i = i.replace('vidnode.net', 'vidcloud9.com')
                                hoster = 'vidcloud9'
                            sources.append({'source': hoster, 'quality': '720p', 'language': 'en', 'url': i,
                                            'direct': False, 'debridonly': False})
                except Exception:
                    pass
            return sources
        except:
            log_utils.log('cartoonhd - Exception', 1)
            return sources
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None: return sources

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s season %d' % (title, int(
                data['season'])) if 'tvshowtitle' in data else title
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            query = quote_plus(query)

            url = urljoin(self.base_link, self.search_link % query)

            ua = {'User-Agent': client.agent()}
            r = cfScraper.get(url, headers=ua).content
            r = six.ensure_text(r, errors='replace')
            posts = client.parseDOM(r, 'div', attrs={'class': 'item'})
            posts = [(client.parseDOM(i, 'a',
                                      ret='href')[1], client.parseDOM(i,
                                                                      'a')[1],
                      re.findall('Release:\s*(\d{4})</', i,
                                 re.I | re.DOTALL)[1]) for i in posts if i]
            posts = [(i[0], client.parseDOM(i[1], 'i')[0], i[2]) for i in posts
                     if i]

            if 'tvshowtitle' in data:
                sep = 'season %d' % int(data['season'])
                sepi = 'season-%1d/episode-%1d.html' % (int(
                    data['season']), int(data['episode']))
                post = [i[0] for i in posts if sep in i[1].lower()][0]
                data = cfScraper.get(post, headers=ua).content
                data = six.ensure_text(data, errors='replace')
                link = client.parseDOM(data, 'a', ret='href')
                link = [i for i in link if sepi in i][0]
            else:
                link = [
                    i[0] for i in posts
                    if cleantitle.get(i[1]) == cleantitle.get(title)
                    and hdlr == i[2]
                ][0]

            r = cfScraper.get(link, headers=ua).content
            r = six.ensure_text(r, errors='replace')
            try:
                v = re.findall('document.write\(Base64.decode\("(.+?)"\)',
                               r)[0]
                v = v.encode('utf-8')
                b64 = base64.b64decode(v)
                b64 = six.ensure_text(b64, errors='ignore')
                url = client.parseDOM(b64, 'iframe', ret='src')[0]
                try:
                    host = re.findall('([\w]+[.][\w]+)$',
                                      urlparse(url.strip().lower()).netloc)[0]
                    host = client.replaceHTMLCodes(host)
                    host = six.ensure_str(host)
                    valid, hoster = source_utils.is_host_valid(host, hostDict)
                    if valid:
                        sources.append({
                            'source': hoster,
                            'quality': 'SD',
                            'language': 'en',
                            'url': url.replace('\/', '/'),
                            'direct': False,
                            'debridonly': False
                        })
                except:
                    log_utils.log('plockers4 Exception', 1)
                    pass
            except:
                log_utils.log('plockers3 Exception', 1)
                pass
            r = client.parseDOM(r, 'div', {'class': 'server_line'})
            r = [(client.parseDOM(i, 'a', ret='href')[0],
                  client.parseDOM(i, 'p', attrs={'class':
                                                 'server_servername'})[0])
                 for i in r]
            if r:
                for i in r:
                    try:
                        host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
                        url = i[0].replace('\/', '/')
                        host = client.replaceHTMLCodes(host)
                        host = six.ensure_str(host)
                        if 'other' in host: continue
                        valid, hoster = source_utils.is_host_valid(
                            host, hostDict)
                        if valid:
                            sources.append({
                                'source': hoster,
                                'quality': 'SD',
                                'language': 'en',
                                'url': url,
                                'direct': False,
                                'debridonly': False
                            })
                    except:
                        log_utils.log('plockers5 Exception', 1)
                        pass
            return sources
        except:
            log_utils.log('plockers Exception', 1)
            return
Пример #10
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s season %d' % (title, int(
                data['season'])) if 'tvshowtitle' in data else data['title']
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            query = urllib.quote_plus(query)

            url = urlparse.urljoin(self.base_link, self.search_link % query)

            self.s = cfscrape.create_scraper()

            self.ua = {'User-Agent': client.agent(), 'Referer': self.base_link}
            r = self.s.get(url, headers=self.ua).text
            posts = client.parseDOM(r, 'div', attrs={'class': 'item'})
            posts = [(client.parseDOM(i, 'a',
                                      ret='href')[1], client.parseDOM(i,
                                                                      'a')[1])
                     for i in posts if i]

            posts = [(i[0], client.parseDOM(i[1], 'i')[0]) for i in posts if i]

            if 'tvshowtitle' in data:
                sep = 'season %d' % int(data['season'])
                sepi = 'season-%1d/episode-%1d.html' % (int(
                    data['season']), int(data['episode']))
                post = [i[0] for i in posts if sep in i[1].lower()][0]
                data = self.s.get(post, headers=self.ua).content
                link = client.parseDOM(data, 'a', ret='href')
                link = [i for i in link if sepi in i][0]
            else:
                link = [
                    i[0] for i in posts
                    if cleantitle.get(i[1]) == cleantitle.get(title)
                    and hdlr in i[1]
                ][0]

            r = self.s.get(link, headers=self.ua).content
            try:
                v = re.findall('document.write\(Base64.decode\("(.+?)"\)',
                               r)[0]
                b64 = base64.b64decode(v)
                url = client.parseDOM(b64, 'iframe', ret='src')[0]
                try:
                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(url.strip().lower()).netloc)[0]
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    sources.append({
                        'source': host,
                        'quality': 'SD',
                        'language': 'en',
                        'url': url.replace('\/', '/'),
                        'direct': False,
                        'debridonly': False
                    })
                except:
                    pass
            except:
                pass
            r = client.parseDOM(r, 'div', {'class': 'server_line'})
            r = [(client.parseDOM(i, 'a', ret='href')[0],
                  client.parseDOM(i, 'p', attrs={'class':
                                                 'server_servername'})[0])
                 for i in r]
            if r:
                for i in r:
                    try:
                        host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
                        url = i[0]
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')
                        if 'other' in host: continue
                        sources.append({
                            'source': host,
                            'quality': 'SD',
                            'language': 'en',
                            'url': url.replace('\/', '/'),
                            'direct': False,
                            'debridonly': False
                        })
                    except:
                        pass
            return sources
        except:
            return
Пример #11
0
def google(url):
    try:
        if any(x in url for x in ['youtube.', 'docid=']):
            url = 'https://drive.google.com/file/d/%s/view' % re.compile(
                'docid=([\w-]+)').findall(url)[0]

        netloc = urllib_parse.urlparse(url.strip().lower()).netloc
        netloc = netloc.split('.google')[0]

        if netloc == 'docs' or netloc == 'drive':
            url = url.split('/preview', 1)[0]
            url = url.replace('drive.google.com', 'docs.google.com')

        headers = {'User-Agent': client.agent()}

        result = client.request(url, output='extended', headers=headers)

        try:
            headers['Cookie'] = result[2]['Set-Cookie']
        except:
            pass

        result = result[0]

        if netloc == 'docs' or netloc == 'drive':
            result = re.compile('"fmt_stream_map",(".+?")').findall(result)[0]
            result = json.loads(result)
            result = [i.split('|')[-1] for i in result.split(',')]
            result = sum([googletag(i, append_height=True) for i in result],
                         [])

        elif netloc == 'photos':
            result = result.replace('\r', '').replace('\n',
                                                      '').replace('\t', '')
            result = re.compile('"\d*/\d*x\d*.+?","(.+?)"').findall(result)[0]

            result = result.replace('\\u003d', '=').replace('\\u0026', '&')
            result = re.compile('url=(.+?)&').findall(result)
            result = [urllib_parse.unquote(i) for i in result]

            result = sum([googletag(i, append_height=True) for i in result],
                         [])

        elif netloc == 'picasaweb':
            id = re.compile('#(\d*)').findall(url)[0]

            result = re.search('feedPreload:\s*(.*}]}})},', result,
                               re.DOTALL).group(1)
            result = json.loads(result)['feed']['entry']

            if len(result) > 1:
                result = [
                    i for i in result if str(id) in i['link'][0]['href']
                ][0]
            elif len(result) == 1:
                result = result[0]

            result = result['media']['content']
            result = [i['url'] for i in result if 'video' in i['type']]
            result = sum([googletag(i, append_height=True) for i in result],
                         [])

        elif netloc == 'plus':
            id = (urllib_parse.urlparse(url).path).split('/')[-1]

            result = result.replace('\r', '').replace('\n',
                                                      '').replace('\t', '')
            result = result.split('"%s"' % id)[-1].split(']]')[0]

            result = result.replace('\\u003d', '=').replace('\\u0026', '&')
            result = re.compile('url=(.+?)&').findall(result)
            result = [urllib_parse.unquote(i) for i in result]

            result = sum([googletag(i, append_height=True) for i in result],
                         [])

        result = sorted(result, key=lambda i: i.get('height', 0), reverse=True)

        url = []
        for q in ['4K', '1440p', '1080p', 'HD', 'SD']:
            try:
                url += [[i for i in result if i.get('quality') == q][0]]
            except:
                pass

        for i in url:
            i.pop('height', None)
            i.update(
                {'url': i['url'] + '|%s' % urllib_parse.urlencode(headers)})

        if not url: return
        return url
    except:
        return
Пример #12
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            query = urlparse.urljoin(self.base_link, url)
            r = client.request(query)

            r1 = client.parseDOM(r, 'div', attrs={'id': 'playeroptions'})[0]
            links = dom.parse_dom(r1, 'li', req=['data-post', 'data-nume'])
            links = [(i.attrs['data-post'], i.attrs['data-nume'],
                      client.parseDOM(i.content,
                                      'span',
                                      attrs={'class': 'title'})[0])
                     for i in links]
            links = [(i[0], i[1], i[2]) for i in links
                     if not 'trailer' in i[1]]
            try:
                extra = client.parseDOM(r,
                                        'div',
                                        attrs={'class': 'links_table'})[0]
                extra = dom.parse_dom(extra, 'td')
                extra = [
                    dom.parse_dom(i.content, 'img', req='src') for i in extra
                    if i
                ]
                extra = [(i[0].attrs['src'],
                          dom.parse_dom(i[0].content, 'a', req='href'))
                         for i in extra if i]
                extra = [(re.findall('domain=(.+?)$',
                                     i[0])[0], i[1][0].attrs['href'])
                         for i in extra if i]
            except BaseException:
                pass
            info = []
            ptype = 'tv' if '/tvshows/' in query else 'movie'
            for item in links:

                plink = 'https://onlinemovie.gr/wp-admin/admin-ajax.php'
                pdata = {
                    'action': 'doo_player_ajax',
                    'post': item[0],
                    'nume': item[1],
                    'type': ptype
                }
                pdata = urllib.urlencode(pdata)
                link = client.request(plink, post=pdata)
                link = client.parseDOM(link, 'iframe', ret='src')[0]
                lang = 'gr'
                quality, info = source_utils.get_release_quality(
                    item[2], item[2])
                info.append('SUB')
                info = ' | '.join(info)
                if 'jwplayer' in link:
                    sub = re.findall('&sub=(.+?)&id', link)[0]
                    sub = urllib.unquote(sub)
                    sub = urlparse.urljoin(
                        self.base_link,
                        sub) if sub.startswith('/sub/') else sub
                    url = re.findall('source=(.+?)&sub', link)[0]
                    url = urllib.unquote(url)
                    url = urlparse.urljoin(self.base_link,
                                           url) if url.startswith('/') else url

                    if 'cdn' in url or 'nd' in url or url.endswith(
                            '.mp4') or url.endswith('.m3u8'):
                        sources.append({
                            'source': 'CDN',
                            'quality': quality,
                            'language': lang,
                            'url': url,
                            'info': info,
                            'direct': True,
                            'debridonly': False,
                            'sub': sub
                        })

                elif 'api.myhls' in link:
                    quality2, info = source_utils.get_release_quality(
                        item[2], None)
                    info.append('SUB')
                    info = ' | '.join(info)
                    data = client.request(link, referer=self.base_link)
                    if not unjuice.test(data): raise Exception()
                    r = unjuice.run(data)
                    urls = re.findall(
                        '''file['"]:['"]([^'"]+).+?label":['"]([^'"]+)''', r,
                        re.DOTALL)
                    sub = [i[0] for i in urls if 'srt' in i[0]][0]
                    sub = urlparse.urljoin(
                        self.base_link,
                        sub) if sub.startswith('/sub/') else sub

                    urls = [(i[0], i[1]) for i in urls if not '.srt' in i[0]]
                    for i in urls:
                        host = 'GVIDEO'
                        quality, url = i[1].lower(), i[0]

                        url = '%s|User-Agent=%s&Referer=%s' % (
                            url, urllib.quote(client.agent()), link)
                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': lang,
                            'url': url,
                            'info': info,
                            'direct': True,
                            'debridonly': False,
                            'sub': sub
                        })

                elif 'myhls.stream' in link:
                    vid = link.split('/')[-1]
                    plink = 'https://myhls.stream/api/source/%s' % vid
                    data = client.request(plink,
                                          post='r=',
                                          referer=link,
                                          XHR=True)
                    data = json.loads(data)

                    urls = data['data']

                    sub = data['captions'][0]['path']
                    sub = 'https://myhls.stream/asset' + sub if sub.startswith(
                        '/') else sub

                    for i in urls:
                        url = i['file'] if not i['file'].startswith(
                            '/') else 'https://myhls.stream/%s' % i['file']
                        quality = i['label']
                        host = 'CDN-HLS'

                        sources.append({
                            'source': host,
                            'quality': quality,
                            'language': lang,
                            'url': url,
                            'info': info,
                            'direct': True,
                            'debridonly': False,
                            'sub': sub
                        })

                elif 'drive' in link:
                    quality, info = source_utils.get_release_quality(
                        item[1], None)
                    info.append('SUB')
                    info = ' | '.join(info)
                    try:
                        links = directstream.google(item[0])
                        for x in links:
                            sources.append({
                                'source': 'GVIDEO',
                                'quality': x['quality'],
                                'language': lang,
                                'url': x['url'],
                                'info': info,
                                'direct': True,
                                'debridonly': False,
                                'sub': sub
                            })
                    except BaseException:
                        pass

                    try:
                        r = client.request(item[0])
                        links = re.findall('''\{file:\s*['"]([^'"]+)''', r,
                                           re.DOTALL)
                        for x in links:
                            sources.append({
                                'source': 'GVIDEO',
                                'quality': quality,
                                'language': lang,
                                'url': x,
                                'info': info,
                                'direct': True,
                                'debridonly': False,
                                'sub': sub
                            })

                    except BaseException:
                        pass

                else:
                    continue

            for item in extra:
                url = item[1]
                if 'movsnely' in url:
                    url = client.request(url, output='geturl', redirect=True)
                else:
                    url = url
                quality = 'SD'
                lang, info = 'gr', 'SUB'
                valid, host = source_utils.is_host_valid(item[0], hostDict)
                if not valid: continue

                sources.append({
                    'source': host,
                    'quality': quality,
                    'language': lang,
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': False,
                    'sub': sub
                })

            return sources
        except BaseException:
            return sources
Пример #13
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:

            if url is None:
                return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title']
            year = data['year']

            search_id = title.lower()
            url = urljoin(self.base_link,
                          self.search_link % (search_id.replace(' ', '+')))
            headers = {
                'User-Agent': client.agent(),
                'Accept': '*/*',
                'Accept-Encoding': 'identity;q=1, *;q=0',
                'Accept-Language': 'en-US,en;q=0.5',
                'Connection': 'keep-alive',
                'Pragma': 'no-cache',
                'Cache-Control': 'no-cache',
                'DNT': '1'
            }

            response = requests.Session()
            r = response.get(url, headers=headers, timeout=5).text
            r = client.parseDOM(r, 'div', attrs={'class': 'container'})[1]
            items = client.parseDOM(
                r, 'div', attrs={'class': r'col-xs-12 col-sm-6 col-md-3 '})
            for item in items:
                movie_url = client.parseDOM(item, 'a', ret='href')[0]
                movie_title = re.compile('div class="post-title">(.+?)<',
                                         re.DOTALL).findall(item)[0]
                if cleantitle.get(title).lower() == cleantitle.get(
                        movie_title).lower():

                    r = response.get(movie_url, headers=headers,
                                     timeout=5).text
                    year_data = re.findall(
                        '<h2 style="margin-bottom: 0">(.+?)</h2>', r,
                        re.IGNORECASE)[0]
                    if year == year_data:
                        links = re.findall(r"<a href='(.+?)'>(\d+)p<\/a>", r)

                        for link, quality in links:

                            if not link.startswith('https:'):
                                link = 'https:' + link.replace('http:', '')
                            link = link + '|Referer=https://iwaatch.com/movie/' + title

                            quality, info = source_utils.get_release_quality(
                                quality, link)

                            sources.append({
                                'source': 'Direct',
                                'quality': quality,
                                'language': 'en',
                                'url': link,
                                'direct': True,
                                'debridonly': False
                            })
            return sources
        except:
            log_utils.log('iWAATCH - Exception', 1)
            return sources