示例#1
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            hostDict = hostprDict + hostDict

            if url == None: return sources

            r = cfScraper.get(url).content
            quality = re.findall(">(\w+)<\/p", r)
            if quality[0] == "HD":
                quality = "720p"
            else:
                quality = "SD"
            r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
            r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]

            for i in r[0]:
                url = {'url': i.attrs['href'], 'data-film': i.attrs['data-film'], 'data-server': i.attrs['data-server'],
                       'data-name': i.attrs['data-name']}
                url = urllib.urlencode(url)
                valid, host = source_utils.is_host_valid(i.content, hostDict)
                if valid:
                    sources.append({'source': host, 'quality': quality, 'language': 'en', 'url': url, 'direct': False, 'debridonly': False})

            return sources
        except:
            return
示例#2
0
    def sources(self, url, hostDict, hostprDict):

        self.sources = []

        try:
            if url is None:
                return self.sources

            if debrid.status() is False:
                return self.sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            title = cleantitle.get_query(title)

            hdlr = data['year']
            hdlr2 = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else ''
            imdb = data['imdb']

            url = self.search(title, hdlr)
            headers = {'User-Agent': client.agent()}
            r = cfScraper.get(url, headers=headers).content
            r = ensure_text(r, errors='replace')
            if hdlr2 == '':
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'releases'})[0]
            else:
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'episodes'})[0]
            r = dom_parser2.parse_dom(r.content, 'a', req=['href'])
            r = [(i.content, urljoin(self.base_link, i.attrs['href']))
                 for i in r if i and i.content != 'Watch']
            if hdlr2 != '':
                r = [(i[0], i[1]) for i in r if hdlr2.lower() in i[0].lower()]

            self.hostDict = hostDict + hostprDict
            threads = []

            for i in r:
                threads.append(workers.Thread(self._get_sources, i[0], i[1]))
            [i.start() for i in threads]

            alive = [x for x in threads if x.is_alive() is True]
            while alive:
                alive = [x for x in threads if x.is_alive() is True]
                time.sleep(0.1)
            return self.sources
        except:
            log_utils.log('RMZ - Exception', 1)
            return self.sources
示例#3
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []
            if url is None: return self._sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s s%02de%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query = self.search_link % cleantitle.geturl(query)
            url = urljoin(self.base_link, query)
            r = client.request(url)
            posts = dom_parser2.parse_dom(r, 'div', {'class': 'eTitle'})
            posts = [
                dom_parser2.parse_dom(i.content, 'a', req='href')
                for i in posts if i
            ]
            posts = [(i[0].attrs['href'], re.sub('<.+?>', '', i[0].content))
                     for i in posts if i]
            posts = [
                (i[0], i[1]) for i in posts
                if (cleantitle.get_simple(i[1].split(hdlr)[0]) ==
                    cleantitle.get(title) and hdlr.lower() in i[1].lower())
            ]
            self.hostDict = hostDict + hostprDict
            threads = []

            for i in posts:
                threads.append(workers.Thread(self._get_sources, i))
            [i.start() for i in threads]
            [i.join() for i in threads]

            alive = [x for x in threads if x.is_alive() == True]
            while alive:
                alive = [x for x in threads if x.is_alive() == True]
                time.sleep(0.1)
            return self._sources
        except Exception:
            return self._sources
示例#4
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         clean_title = cleantitle.geturl(url['tvshowtitle']) + '-s%02d' % int(season)
         url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title, url['year'])))
         r = cfScraper.get(url).content
         r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         for i in r[0]:
             if i.content == 'Episode %s' % episode:
                 url = i.attrs['href']
         return url
     except:
         return
示例#5
0
    def _get_sources(self, name, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = cfScraper.get(url, headers=headers).content
            r = ensure_text(r, errors='replace')
            name = client.replaceHTMLCodes(name)
            try:
                _name = name.lower().replace('rr',
                                             '').replace('nf', '').replace(
                                                 'ul', '').replace('cu', '')
            except:
                _name = name
            l = dom_parser2.parse_dom(r, 'pre', {'class': 'links'})
            s = ''
            for i in l:
                s += i.content
            urls = re.findall(
                r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''',
                i.content,
                flags=re.MULTILINE | re.DOTALL)
            urls = [
                i for i in urls if not i.endswith(('.rar', '.zip', '.iso',
                                                   '.idx', '.sub', '.srt'))
            ]
            for url in urls:
                if url in str(self.sources):
                    continue

                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid:
                    continue
                host = client.replaceHTMLCodes(host)
                #host = host.encode('utf-8')
                quality, info = source_utils.get_release_quality(name, url)
                try:
                    size = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        name)[0]
                    dsize, isize = source_utils._size(size)
                except BaseException:
                    dsize, isize = 0.0, ''
                info.insert(0, isize)
                info = ' | '.join(info)
                self.sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True,
                    'size': dsize,
                    'name': _name
                })
        except:
            log_utils.log('RMZ - Exception', 1)
            pass
示例#6
0
 def search(self, title, year):
     try:
         url = urljoin(self.base_link,
                       self.search_link % (quote_plus(title)))
         headers = {'User-Agent': client.agent()}
         r = cfScraper.get(url, headers=headers).content
         r = ensure_text(r, errors='replace')
         r = dom_parser2.parse_dom(r, 'div', {'class': 'list_items'})[0]
         r = dom_parser2.parse_dom(r.content, 'li')
         r = [(dom_parser2.parse_dom(i, 'a', {'class': 'title'}))
              for i in r]
         r = [(i[0].attrs['href'], i[0].content) for i in r]
         r = [(urljoin(self.base_link, i[0])) for i in r
              if cleantitle.get(title) in cleantitle.get(i[1])
              and year in i[1]]
         if r: return r[0]
         else: return
     except:
         log_utils.log('RMZ - Exception', 1)
         return
示例#7
0
    def _get_sources(self, url):
        try:
            item = client.request(url[0])
            title = url[1]
            links = dom_parser2.parse_dom(item, 'a', req='href')
            links = [i.attrs['href'] for i in links]
            info = []
            try:
                size = re.findall(
                    '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', item)[0]
                div = 1 if size.endswith(('GB', 'GiB')) else 1024
                size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                size = '%.2f GB' % size
                info.insert(0, size)
            except Exception:
                pass
            info = ' | '.join(info)
            for url in links:
                if 'youtube' in url: continue
                if any(x in url.lower()
                       for x in ['.rar.', '.zip.', '.iso.']) or any(
                           url.lower().endswith(x)
                           for x in ['.rar', '.zip', '.iso']):
                    raise Exception()

                if any(x in url.lower()
                       for x in ['youtube', 'sample', 'trailer']):
                    raise Exception()
                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid: continue

                host = client.replaceHTMLCodes(host)
                quality, info2 = source_utils.get_release_quality(title, url)

                self._sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': False
                })
        except Exception:
            pass
示例#8
0
    def _get_items(self, url):
        try:
            r = cfScraper.get(url).content
            r = ensure_text(r, errors='replace')
            posts = client.parseDOM(r, 'tbody')[0]
            posts = client.parseDOM(posts, 'tr')
            for post in posts:
                data = dom.parse_dom(post, 'a', req='href')[1]
                link = urljoin(self.base_link, data.attrs['href'])
                name = data.content
                t = name.split(self.hdlr)[0]

                if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(
                        self.title):
                    continue

                try:
                    y = re.findall(
                        '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                        name, re.I)[-1].upper()
                except BaseException:
                    y = re.findall(
                        '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name,
                        re.I)[-1].upper()
                if not y == self.hdlr:
                    continue

                try:
                    size = re.findall(
                        '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        post)[0]
                    dsize, isize = source_utils._size(size)
                except BaseException:
                    dsize, isize = 0.0, ''

                self.items.append((name, link, isize, dsize))
            return self.items
        except:
            log_utils.log('1337x_exc0', 1)
            return self.items
示例#9
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title']

            hdlr = data['year']

            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', title)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            r = client.request(url)

            posts = client.parseDOM(r, 'div', attrs={'class': 'video_title'})

            items = []

            for post in posts:
                try:
                    data = dom_parser2.parse_dom(post,
                                                 'a',
                                                 req=['href', 'title'])[0]
                    t = data.content
                    y = re.findall('\((\d{4})\)', data.attrs['title'])[0]
                    qual = data.attrs['title'].split('-')[1]
                    link = data.attrs['href']

                    if not cleantitle.get(t) == cleantitle.get(title):
                        raise Exception()
                    if not y == hdlr:
                        raise Exception()

                    items += [(link, qual)]

                except Exception:
                    pass
            for item in items:
                try:
                    r = client.request(item[0]) if item[0].startswith(
                        'http') else client.request(
                            urlparse.urljoin(self.base_link, item[0]))

                    qual = client.parseDOM(r, 'h1')[0]
                    # quality = source_utils.get_release_quality(item[1], qual)[0]

                    url = re.findall('''frame_url\s*=\s*["']([^']+)['"]\;''',
                                     r, re.DOTALL)[0]
                    url = url if url.startswith('http') else urlparse.urljoin(
                        'https://', url)

                    ua = {
                        'User-Agent':
                        'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'
                    }

                    postID = url.split('/embed/')[1]
                    post_link = 'https://vidlink.org/embed/update_views'
                    payload = {'postID': postID}
                    headers = ua
                    headers['X-Requested-With'] = 'XMLHttpRequest'
                    headers['Referer'] = url

                    ihtml = client.request(post_link,
                                           post=payload,
                                           headers=headers)
                    linkcode = jsunpack.unpack(ihtml).replace('\\', '')
                    try:
                        extra_link = re.findall(r'var oploadID="(.+?)"',
                                                linkcode)[0]
                        oload = 'https://openload.co/embed/' + extra_link
                        sources.append({
                            'source': 'openload.co',
                            'quality': '1080p',
                            'language': 'en',
                            'url': oload,
                            'direct': False,
                            'debridonly': False
                        })

                    except Exception:
                        pass

                    give_me = re.findall(r'var file1="(.+?)"', linkcode)[0]
                    stream_link = give_me.split('/pl/')[0]
                    headers = {
                        'Referer':
                        'https://vidlink.org/',
                        'User-Agent':
                        'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'
                    }
                    r = client.request(give_me, headers=headers)
                    my_links = re.findall(
                        r'[A-Z]{10}=\d+x(\d+)\W[A-Z]+=\"\w+\"\s+\/(.+?)\.', r)
                    for quality_bitches, link in my_links:

                        if '1080' in quality_bitches:
                            quality = '1080p'
                        elif '720' in quality_bitches:
                            quality = '720p'
                        elif '480' in quality_bitches:
                            quality = 'SD'
                        elif '360' in quality_bitches:
                            quality = 'SD'
                        else:
                            quality = 'SD'

                        final = stream_link + '/' + link + '.m3u8'
                        sources.append({
                            'source': 'GVIDEO',
                            'quality': quality,
                            'language': 'en',
                            'url': final,
                            'direct': True,
                            'debridonly': False
                        })

                except Exception:
                    pass

            return sources
        except Exception:
            return sources
示例#10
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:

            if url == None: return sources

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            year = data['year']
            hdlr = 's%02de%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else year
            query = '%s %s' % (title, year)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            query = quote_plus(query)

            url = urljoin(self.base_link, self.search_link % query)

            r = client.request(url)
            posts = client.parseDOM(r, 'item')

            for post in posts:
                try:
                    name = client.parseDOM(post, 'title')[0]
                    name = client.replaceHTMLCodes(name)
                    name = ensure_str(name, errors='ignore')

                    y = re.findall('(\d{4}|S\d+E\d+|S\d+)', name, re.I)[0]

                    t = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                        '', name, re.I)

                    if not (re.findall('\w+', cleantitle.get(t))[0]
                            == cleantitle.get(title) and year == y):
                        raise Exception()

                    if not 'tvshowtitle' in data:
                        links = client.parseDOM(post, 'a', ret='href')
                    else:
                        ep = '%02d' % int(data['episode'])
                        pattern = '>Season[\s|\:]%d<(.+?)(?:<b>Season|</content)' % int(
                            data['season'])
                        data = re.findall(pattern, post, re.S | re.I)
                        data = dom_parser2.parse_dom(data, 'a', req='href')
                        links = [(i.attrs['href'], i.content.lower())
                                 for i in data]
                        links = [
                            i[0] for i in links
                            if (hdlr in i[0] or hdlr in i[1] or ep == i[1])
                        ]

                    for url in links:
                        try:
                            if any(x in url for x in [
                                    '.online', 'xrysoi.', 'filmer', '.bp',
                                    '.blogger'
                            ]):
                                continue

                            url = client.replaceHTMLCodes(url)
                            valid, host = source_utils.is_host_valid(
                                url, hostDict)
                            #if 'hdvid' in host: valid = True
                            if not valid: continue
                            # try: dub = re.findall('ΜΕΤΑΓΛΩΤΙΣΜΕΝΟ', post, re.S|re.I)[0]
                            # except: dub = None
                            # info = ' / '.join((name, 'DUB')) if dub else name

                            sources.append({
                                'source': host,
                                'quality': 'sd',
                                'language': 'gr',
                                'url': url,
                                'direct': False,
                                'debridonly': False
                            })
                        except:
                            pass

                except:
                    log_utils.log('xrysoi_exc', 1)
                    pass

            return sources
        except:
            return sources