コード例 #1
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = self.scraper.get(url).content
         r = dom_parser2.parse_dom(r, 'p', {'class': 'server_play'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         r = [(i[0].attrs['href'],
               re.search('/(\w+).html', i[0].attrs['href'])) for i in r
              if i]
         r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
         for i in r:
             try:
                 host = i[1]
                 if str(host) in str(hostDict):
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'en',
                         'url': i[0].replace('\/', '/'),
                         'direct': False,
                         'debridonly': False
                     })
             except:
                 pass
         return sources
     except Exception:
         return
コード例 #2
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         url['premiered'], url['season'], url['episode'] = premiered, season, episode
         try:
             clean_title = cleantitle.geturl(url['tvshowtitle']) + '-season-%d' % int(season)
             search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
             r = self.scraper.get(search_url).content
             r = dom_parser2.parse_dom(r, 'li', {'class': 'item'})
             r = [(dom_parser2.parse_dom(i, 'a', attrs={'class': 'title'}),
                   dom_parser2.parse_dom(i, 'div', attrs={'class': 'status'})[0]) for i in r if i]
             r = [(i[0][0].attrs['href'], re.findall('(.+?)</b><br', i[0][0].content, re.DOTALL)[0],
                   re.findall('(\d+)', i[1].content)[0]) for i in r if i]
             r = [(i[0], i[1].split(':')[0], i[2]) for i in r
                  if (cleantitle.get(i[1].split(':')[0]) == cleantitle.get(url['tvshowtitle']) and i[2] == str(
                     int(season)))]
             url = r[0][0]
         except:
             pass
         data = self.scraper.get(url).content
         data = client.parseDOM(data, 'div', attrs={'id': 'details'})
         data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
         url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]
         return url[0][1]
     except:
         return
コード例 #3
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            r = self.scraper.get(url).content
            quality = re.findall(">(\w+)<\/p", r)
            if quality[0] == "HD":
                quality = "720p"
            else:
                quality = "SD"
            r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
            r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]

            for i in r[0]:
                url = {
                    'url': i.attrs['href'],
                    'data-film': i.attrs['data-film'],
                    'data-server': i.attrs['data-server'],
                    'data-name': i.attrs['data-name']
                }
                url = urllib.urlencode(url)
                sources.append({
                    'source': i.content,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'direct': False,
                    'debridonly': False
                })
            return sources
        except:
            return sources
コード例 #4
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []
            if url is None: return self._sources
            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query = self.search_link % cleantitle.geturl(query)
            url = urlparse.urljoin(self.base_link, query)
            r = client.request(url)
            posts = dom_parser2.parse_dom(r, 'div', {'class': 'eTitle'})
            posts = [
                dom_parser2.parse_dom(i.content, 'a', req='href')
                for i in posts if i
            ]
            posts = [(i[0].attrs['href'], re.sub('<.+?>', '', i[0].content))
                     for i in posts if i]
            posts = [
                (i[0], i[1]) for i in posts
                if (cleantitle.get_simple(i[1].split(hdlr)[0]) ==
                    cleantitle.get(title) and hdlr.lower() in i[1].lower())
            ]
            self.hostDict = hostDict + hostprDict
            threads = []

            for i in posts:
                threads.append(workers.Thread(self._get_sources, i))
            [i.start() for i in threads]
            [i.join() for i in threads]

            alive = [x for x in threads if x.is_alive() == True]
            while alive:
                alive = [x for x in threads if x.is_alive() == True]
                time.sleep(0.1)
            return self._sources
        except Exception:
            return self._sources
コード例 #5
0
ファイル: rapidmoviez.py プロジェクト: csu-xiao-an/LilacTV
    def sources(self, url, hostDict, hostprDict):

        self.sources = []

        try:
            if url is None:
                return self.sources

            if debrid.status() is False:
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = data['year']
            hdlr2 = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else ''
            imdb = data['imdb']

            url = self.search(title, hdlr)
            headers = {'User-Agent': client.agent()}
            r = self.scraper.get(url, headers=headers).content
            if hdlr2 == '':
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'releases'})[0]
            else:
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'episodes'})[0]
            r = dom_parser2.parse_dom(r.content, 'a', req=['href'])
            r = [(i.content, urlparse.urljoin(self.base_link, i.attrs['href']))
                 for i in r if i and i.content != 'Watch']
            if hdlr2 != '':
                r = [(i[0], i[1]) for i in r if hdlr2.lower() in i[0].lower()]

            self.hostDict = hostDict + hostprDict
            threads = []

            for i in r:
                threads.append(workers.Thread(self._get_sources, i[0], i[1]))
            [i.start() for i in threads]

            alive = [x for x in threads if x.is_alive() is True]
            while alive:
                alive = [x for x in threads if x.is_alive() is True]
                time.sleep(0.1)
            return self.sources
        except:
            return self.sources
コード例 #6
0
    def _get_sources(self, item, hostDict):
        try:
            quality, info = source_utils.get_release_quality(item[0], item[1])
            size = item[2] if item[2] != '0'else item[0]

            try:
                size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', size)[-1]
                div = 1 if size.endswith(('GB', 'GiB')) else 1024
                size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                size = '%.2f GB' % size
                info.append(size)

            except Exception:
                pass

            data = self.scraper.get(item[1]).content

            try:
                r = client.parseDOM(data, 'li', attrs={'class': 'elemento'})
                r = [(dom_parser2.parse_dom(i, 'a', req='href')[0],
                      dom_parser2.parse_dom(i, 'img', req='alt')[0],
                      dom_parser2.parse_dom(i, 'span', {'class': 'd'})[0]) for i in r]
                urls = [('http:' + i[0].attrs['href'] if not i[0].attrs['href'].startswith('http') else
                         i[0].attrs['href'], i[1].attrs['alt'], i[2].content) for i in r if i[0] and i[1]]

                for url, host, qual in urls:

                    try:
                        if any(x in url for x in ['.rar', '.zip', '.iso', ':Upcoming']): raise Exception()
                        url = client.replaceHTMLCodes(url)
                        url = url.encode('utf-8')

                        valid, host = source_utils.is_host_valid(host, hostDict)
                        if not valid: continue
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')
                        quality, info = source_utils.get_release_quality(qual, quality)
                        info.append('HEVC')
                        info = ' | '.join(info)
                        self._sources.append(
                            {'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                             'direct': False, 'debridonly': True})
                    except Exception:
                        pass
            except Exception:
                pass

        except BaseException:
            return
コード例 #7
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title)
         search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
         r = self.scraper.get(search_url).content
         r = dom_parser2.parse_dom(r, 'li', {'class': 'item'})
         r = [(dom_parser2.parse_dom(i, 'a', attrs={'class': 'title'}),
               re.findall('status-year">(\d{4})</div', i.content, re.DOTALL)[0]) for i in r if i]
         r = [(i[0][0].attrs['href'], re.findall('(.+?)</b><br', i[0][0].content, re.DOTALL)[0], i[1]) for i in r if
              i]
         r = [(i[0], i[1], i[2]) for i in r if (cleantitle.get(i[1]) == cleantitle.get(title) and i[2] == year)]
         url = r[0][0]
         return url
     except Exception:
         return
コード例 #8
0
    def _get_items(self, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = client.request(url, headers=headers)
            posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0]
            posts = client.parseDOM(posts, 'tr')
            for post in posts:
                data = dom.parse_dom(post, 'a', req='href')[1]
                link = urlparse.urljoin(self.base_link, data.attrs['href'])
                name = data.content
                t = name.split(self.hdlr)[0]

                if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(self.title): continue

                try:
                    y = re.findall('[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]', name, re.I)[-1].upper()
                except BaseException:
                    y = re.findall('[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name, re.I)[-1].upper()
                if not y == self.hdlr: continue

                try:
                    size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                    div = 1 if size.endswith('GB') else 1024
                    size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                    size = '%.2f GB' % size
                except BaseException:
                    size = '0'

                self.items.append((name, link, size))
            return self.items
        except BaseException:
            return self.items
コード例 #9
0
    def _get_sources(self, url):
        try:
            item = client.request(url[0])
            title = url[1]
            links = dom_parser2.parse_dom(item, 'a', req='href')
            links = [i.attrs['href'] for i in links]
            info = []
            try:
                size = re.findall('((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', item)[0]
                div = 1 if size.endswith(('GB', 'GiB')) else 1024
                size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                size = '%.2f GB' % size
                info.append(size)
            except Exception:
                pass
            info = ' | '.join(info)
            for url in links:
                if 'youtube' in url: continue
                if any(x in url.lower() for x in ['.rar.', '.zip.', '.iso.']) or any(
                        url.lower().endswith(x) for x in ['.rar', '.zip', '.iso']): raise Exception()

                if any(x in url.lower() for x in ['youtube', 'sample', 'trailer']): raise Exception()
                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid: continue

                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')
                quality, info2 = source_utils.get_release_quality(title, url)
                if url in str(self._sources): continue

                self._sources.append(
                    {'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info, 'direct': False,
                     'debridonly': True})
        except Exception:
            pass
コード例 #10
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            imdb = data['imdb']

            url = urlparse.urljoin(self.base_link, 'player/play.php?imdb=%s' % imdb)
            data = client.request(url, referer=self.base_link)
            links = dom.parse_dom(data, 'jwplayer:source', req=['file', 'label'])
            for link in links:
                url = link.attrs['file']
                url = url.replace(' ', '%20') + '|User-Agent={0}&Referer={1}'.format(
                    urllib.quote(client.agent()), url)
                quality, info = source_utils.get_release_quality(link.attrs['label'])
                sources.append(
                    {'source': 'GVIDEO', 'quality': quality, 'language': 'en', 'url': url,
                     'direct': True, 'debridonly': False})

            return sources
        except BaseException:
            return sources
コード例 #11
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            query = self.tvsearch_link % urllib.quote_plus(
                cleantitle.query(tvshowtitle))
            query = urlparse.urljoin(self.base_link, query.lower())
            result = client.request(query, referer=self.base_link)
            result = client.parseDOM(result,
                                     'div',
                                     attrs={'class': 'index_item.+?'})

            result = [(dom.parse_dom(i, 'a', req=['href', 'title'])[0])
                      for i in result if i]
            result = [(
                i.attrs['href']
            ) for i in result if cleantitle.get(tvshowtitle) == cleantitle.get(
                re.sub(
                    '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                    '',
                    i.attrs['title'],
                    flags=re.I))][0]

            url = client.replaceHTMLCodes(result)
            url = url.encode('utf-8')
            return url
        except Exception:
            return
コード例 #12
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         clean_title = cleantitle.geturl(url['tvshowtitle'])+'-s%02d' % int(season)
         url = urlparse.urljoin(self.base_link, (self.search_link %(clean_title,url['year'])))
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         for i in r[0]:
             if i.content == 'Episode %s'%episode:
                 url = i.attrs['href']
         return url
     except:
         return
コード例 #13
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(title).replace('-','+')
            url = urlparse.urljoin(self.base_link, (self.movies_search_path % clean_title))
            r = self.scraper.get(url).content

            r = dom_parser2.parse_dom(r, 'div', {'id': 'movie-featured'})
            r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
            r = [(i[0].attrs['href'], re.search('Release:\s*(\d+)', i[0].content)) for i in r if i]
            r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
            r = [(i[0], i[1]) for i in r if i[1] == year]
            if r[0]: 
                url = r[0][0]
                return url
            else: return
        except Exception:
            return
コード例 #14
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'div', {'class': 'el-item'})
         r = [(dom_parser2.parse_dom(i, 'div', {'class': 'season'}), \
               dom_parser2.parse_dom(i, 'div', {'class': 'episode'}), \
               dom_parser2.parse_dom(i, 'a', req='href')) \
              for i in r if i]
         r = [(i[2][0].attrs['href']) for i in r if i[0][0].content == 'Season %01d' % int(season) \
              and i[1][0].content == 'Episode %01d' % int(episode)]
         if r:
             return r[0]
         else:
             return
     except:
         return
コード例 #15
0
 def resolve(self, url):
     if 'hideurl' in url:
         data = self.scraper.get(url).content
         data = client.parseDOM(data, 'div', attrs={'class': 'row'})
         url = [dom_parser2.parse_dom(i, 'a', req='href')[0] for i in data]
         url = [i.attrs['href'] for i in url if 'direct me' in i.content][0]
         return url
     else:
         return url
コード例 #16
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['title']

            hdlr = data['year']

            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', title)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            r = client.request(url)

            posts = client.parseDOM(r, 'div', attrs={'class': 'video_title'})

            items = []

            for post in posts:
                try:
                    data = dom_parser2.parse_dom(post, 'a', req=['href', 'title'])[0]
                    t = data.content
                    y = re.findall('\((\d{4})\)', data.attrs['title'])[0]
                    qual = data.attrs['title'].split('-')[1]
                    link = data.attrs['href']

                    if not cleantitle.get(t) == cleantitle.get(title): raise Exception()
                    if not y == hdlr: raise Exception()

                    items += [(link, qual)]

                except BaseException:
                    pass
            for item in items:
                try:
                    r = client.request(item[0]) if item[0].startswith('http') else client.request(urlparse.urljoin(self.base_link, item[0]))

                    qual = client.parseDOM(r, 'h1')[0]
                    quality = source_utils.get_release_quality(item[1], qual)[0]

                    url = re.findall('''frame_url\s*=\s*["']([^']+)['"]\;''', r, re.DOTALL)[0]
                    url = url if url.startswith('http') else urlparse.urljoin('https://', url)

                    sources.append({'source': 'GVIDEO', 'quality': quality, 'language': 'en', 'url': url,
                                    'direct': False, 'debridonly': False})

                except BaseException:
                    pass

            return sources
        except BaseException:
            return sources
コード例 #17
0
ファイル: rapidmoviez.py プロジェクト: csu-xiao-an/LilacTV
 def search(self, title, year):
     try:
         url = urlparse.urljoin(
             self.base_link, self.search_link % (urllib.quote_plus(title)))
         headers = {'User-Agent': client.agent()}
         r = self.scraper.get(url, headers=headers).content
         r = dom_parser2.parse_dom(r, 'div', {'class': 'list_items'})[0]
         r = dom_parser2.parse_dom(r.content, 'li')
         r = [(dom_parser2.parse_dom(i, 'a', {'class': 'title'}))
              for i in r]
         r = [(i[0].attrs['href'], i[0].content) for i in r]
         r = [(urlparse.urljoin(self.base_link, i[0])) for i in r
              if cleantitle.get(title) in cleantitle.get(i[1])
              and year in i[1]]
         if r:
             return r[0]
         else:
             return
     except:
         return
コード例 #18
0
 def resolve(self, url):
     try:
         r = client.request(url)
         r = dom_parser2.parse_dom(
             r, 'a', req=['href', 'data-episodeid', 'data-linkid'])[0]
         url = r.attrs['href']
         url = client.replaceHTMLCodes(url)
         url = url.encode('utf-8')
         return url
     except:
         return
コード例 #19
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['title']
            year = data['year']
            t = title + year

            query = '%s' % data['title']
            query = re.sub(r'(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link.format(urllib.quote_plus(query))
            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)
            items = client.parseDOM(r, 'li')
            items = [(dom.parse_dom(i, 'a', req='href')[0]) for i in items
                     if year in i]
            items = [(i.attrs['href'], re.sub('<.+?>|\n', '',
                                              i.content).strip())
                     for i in items]
            item = [
                i[0].replace('movie', 'view') for i in items
                if cleantitle.get(t) == cleantitle.get(i[1])
            ][0]

            html = client.request(item)
            streams = re.findall('sources\:\s*\[(.+?)\]\,', html, re.DOTALL)[0]
            streams = re.findall(
                'file:\s*[\'"](.+?)[\'"].+?label:\s*[\'"](.+?)[\'"]', streams,
                re.DOTALL)

            for link, label in streams:
                quality = source_utils.get_release_quality(label, label)[0]
                link += '|User-Agent=%s&Referer=%s' % (urllib.quote(
                    client.agent()), item)
                sources.append({
                    'source': 'Direct',
                    'quality': quality,
                    'language': 'en',
                    'url': link,
                    'direct': True,
                    'debridonly': False
                })

            return sources
        except BaseException:
            return sources
コード例 #20
0
ファイル: rapidmoviez.py プロジェクト: csu-xiao-an/LilacTV
    def _get_sources(self, name, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = self.scraper.get(url, headers=headers).content
            name = client.replaceHTMLCodes(name)
            l = dom_parser2.parse_dom(r, 'div', {'class': 'ppu2h'})
            s = ''
            for i in l:
                s += i.content
            urls = re.findall(
                r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''',
                i.content,
                flags=re.MULTILINE | re.DOTALL)
            urls = [
                i for i in urls if '.rar' not in i or '.zip' not in i
                or '.iso' not in i or '.idx' not in i or '.sub' not in i
            ]
            for url in urls:
                if url in str(self.sources):
                    continue

                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid:
                    continue
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')
                quality, info = source_utils.get_release_quality(name, url)
                try:
                    size = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        name)[0]
                    div = 1 if size.endswith(('GB', 'GiB')) else 1024
                    size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                    size = '%.2f GB' % size
                    info.append(size)
                except BaseException:
                    pass
                info = ' | '.join(info)
                self.sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True
                })
        except:
            pass
コード例 #21
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []
            if url is None: return self._sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query = cleantitle.geturl(query)
            url = urlparse.urljoin(self.base_link, query)

            headers = {'User-Agent': client.agent()}
            r = client.request(url, headers=headers)
            posts = dom_parser2.parse_dom(r, 'li', {
                'class': re.compile('.+?'),
                'id': re.compile('comment-.+?')
            })
            self.hostDict = hostDict + hostprDict
            threads = []

            for i in posts:
                threads.append(workers.Thread(self._get_sources, i.content))
            [i.start() for i in threads]
            [i.join() for i in threads]

            return self._sources
        except Exception:
            return self._sources
コード例 #22
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title, year = data['title'], data['year']
            post = 'action=ajaxsearchlite_search&aslp={0}&asid=1&' \
                   'options=qtranslate_lang%3D0%26set_exactonly' \
                   '%3Dchecked%26set_intitle%3DNone%26set_inpages%3DNone%26customset%255B%255D%3' \
                   'Damy_movie%26customset%255B%255D%3Dvc4_templates%26customset%255B%255D%3Dvc_grid_item%26' \
                   'customset%255B%255D%3Damn_mi-lite'.format(urllib.quote_plus(cleantitle.getsearch(title)))

            post_link = urlparse.urljoin(self.base_link, self.post_link)
            data = client.request(post_link, post=post, referer=self.base_link)
            items = dom.parse_dom(data, 'a', req='href')
            item = [(i.attrs['href']) for i in items
                    if cleantitle.get(title) == cleantitle.get(
                        i.content.split(year)[0].lstrip())][0]
            r = client.request(item)
            quality = re.findall('Quality:(.+?)</p>', r, re.DOTALL)[0]
            FN = client.parseDOM(r,
                                 'input',
                                 ret='value',
                                 attrs={'name': 'filename'})[0]
            FS = client.parseDOM(r,
                                 'input',
                                 ret='value',
                                 attrs={'name': 'fileservername'})[0]
            FSize = client.parseDOM(r,
                                    'input',
                                    ret='value',
                                    attrs={'name': 'filesize'})[0]
            post = 'filename={0}&filesize={1}&fileservername={2}&filepath=downloads'.format(
                urllib.quote_plus(FN), urllib.quote_plus(FSize), FS)
            plink2 = 'https://moviescouch.co/download.php'
            headers = {'Referer': 'https://moviescouch.co/downloading/'}
            pdata = client.request(plink2,
                                   post=post,
                                   redirect=False,
                                   headers=headers,
                                   output='extended')
            link = pdata[2]['Location']
            link = '{0}|Referer={1}'.format(
                urllib.quote(link, '.:/*&^%$#@!_-+='), item)
            quality, info = source_utils.get_release_quality(quality, FN)
            info.append(FSize)
            info = ' | '.join(info)
            sources.append({
                'source': 'GVIDEO',
                'quality': quality,
                'language': 'en',
                'url': link,
                'direct': True,
                'info': info,
                'debridonly': False
            })

            return sources
        except BaseException:
            return sources
コード例 #23
0
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            headers = {}
            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'],
                                      aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)
            url += '/'
            ref_url = url
            mozhdr = {
                'User-Agent':
                'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
            }
            headers = mozhdr
            headers['X-Requested-With'] = 'XMLHttpRequest'
            headers['Referer'] = url
            self.s = cfscrape.create_scraper()
            mid = re.findall('-(\d*)/', url)[0]
            try:
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = self.s.get(u, headers=mozhdr).content
                r = json.loads(r)['html']

                r = dom_parser2.parse_dom(r,
                                          'li',
                                          req=['data-id', 'data-server'])
                r = [(i.attrs['data-id'], i.attrs['data-server'],
                      dom_parser2.parse_dom(i.content, 'a', req='title')[0])
                     for i in r]
                r = [(i[0], i[1], i[2].content)
                     for i in r]  #r = zip(ids, servers, labels)

                urls = []
                for eid in r:
                    try:
                        ep = re.findall('episode.*?(\d+).*?',
                                        eid[2].lower())[0]
                        ep = '%01d' % int(ep)
                    except BaseException:
                        ep = 0
                    if (episode == 0) or (int(ep) == int(episode)):
                        t = int(time.time() * 1000)
                        url = urlparse.urljoin(
                            self.base_link, self.token_link % (eid[0], mid, t))
                        script = self.s.get(url, headers=headers).content
                        if '$_$' in script:
                            params = self.uncensored1(script)
                        elif script.startswith('[]') and script.endswith('()'):
                            params = self.uncensored2(script)
                        elif '_x=' in script:
                            x = re.search('''_x=['"]([^"']+)''',
                                          script).group(1)
                            y = re.search('''_y=['"]([^"']+)''',
                                          script).group(1)
                            params = {'x': x, 'y': y}
                        else:
                            raise Exception()
                        u = urlparse.urljoin(
                            self.base_link, self.source_link %
                            (eid[0], params['x'], params['y']))
                        length = 0
                        count = 0
                        while length == 0 and count < 11:
                            r = self.s.get(u, headers=headers).content
                            length = len(r)
                            if length == 0:
                                if count == 9:
                                    u = u.replace('_sources', '_embed')
                                count += 1

                        try:
                            frames = re.findall('''file['"]:['"]([^'"]+)''', r)
                            for i in frames:
                                if '.srt' in i: continue
                                urls.append((i, eid[2]))
                        except BaseException:
                            pass

                        r1 = json.loads(r)

                        try:
                            frame = r1['src']
                            urls.append((frame, eid[2]))
                        except BaseException:
                            pass
                        try:
                            frame = r1['playlist'][0]
                            frame = frame['sources'][0]
                            frame = frame['file']
                            urls.append((frame, eid[2]))
                        except BaseException:
                            pass

                for i in urls:

                    s, eid = i[0], i[1]
                    try:
                        if 'googleapis' in s:
                            urls = directstream.googletag(s)
                            if not urls:
                                quality, info = source_utils.get_release_quality(
                                    url, eid)
                                sources.append({
                                    'source': 'gvideo',
                                    'quality': quality,
                                    'language': 'en',
                                    'url': s,
                                    'direct': True,
                                    'debridonly': False
                                })
                            else:
                                for i in urls:
                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': i['quality'],
                                        'language': 'en',
                                        'url': i['url'],
                                        'direct': True,
                                        'debridonly': False
                                    })
                        elif 'lh3.' in s:
                            urls = directstream.googletag(s)
                            for i in urls:
                                try:
                                    url2 = directstream.google(
                                        i['url'], ref=ref_url
                                    ) if 'lh3.' in i['url'] else i['url']
                                    if not url2: url2 = i['url']
                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': i['quality'],
                                        'language': 'en',
                                        'url': url2,
                                        'direct': True,
                                        'debridonly': False
                                    })
                                except BaseException:
                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': i['quality'],
                                        'language': 'en',
                                        'url': i['url'],
                                        'direct': True,
                                        'debridonly': False
                                    })
                        elif 'lemonstream' in s:
                            quality, info = source_utils.get_release_quality(
                                s, eid)
                            sources.append({
                                'source': 'CDN',
                                'quality': quality,
                                'language': 'en',
                                'url': s,
                                'direct': True,
                                'debridonly': False
                            })
                        elif 'notcool' in s:
                            s = s.replace('\\', '')
                            quality, info = source_utils.get_release_quality(
                                s, eid)
                            sources.append({
                                'source': 'CDN',
                                'quality': quality,
                                'language': 'en',
                                'url': s,
                                'direct': True,
                                'debridonly': False
                            })
                        else:
                            quality, info = source_utils.get_release_quality(
                                s, eid)
                            valid, host = source_utils.is_host_valid(
                                s, hostDict)
                            if valid:
                                sources.append({
                                    'source': host,
                                    'quality': quality,
                                    'language': 'en',
                                    'url': s,
                                    'direct': False,
                                    'debridonly': False
                                })
                    except BaseException:
                        pass

            except BaseException:
                pass

            return sources
        except BaseException:
            return sources
コード例 #24
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['title'].replace(':', '').lower()
            year = data['year']

            query = '%s %s' % (data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = urlparse.urljoin(self.base_link, self.post_link)

            post = 'do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=%s' % urllib.quote_plus(
                query)

            r = client.request(url, post=post)
            r = client.parseDOM(r, 'div', attrs={'class': 'box-out margin'})
            r = [(dom_parser2.parse_dom(i,
                                        'div',
                                        attrs={'class': 'news-title'}))
                 for i in r if data['imdb'] in i]
            r = [(dom_parser2.parse_dom(i[0], 'a', req='href')) for i in r
                 if i]
            r = [(i[0].attrs['href'], i[0].content) for i in r if i]

            hostDict = hostprDict + hostDict

            for item in r:
                try:
                    name = item[1]
                    y = re.findall('\((\d{4})\)', name)[0]
                    if not y == year: raise Exception()

                    s = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                        name)
                    s = s[0] if s else '0'
                    data = client.request(item[0])
                    data = dom_parser2.parse_dom(data,
                                                 'div',
                                                 attrs={'id': 'r-content'})
                    data = re.findall(
                        '\s*<b><a href=.+?>(.+?)</b>.+?<u><b><a href="(.+?)".+?</a></b></u>',
                        data[0].content, re.DOTALL)
                    u = [(i[0], i[1], s) for i in data if i]

                    for name, url, size in u:
                        try:
                            if '4K' in name:
                                quality = '4K'
                            elif '1080p' in name:
                                quality = '1080p'
                            elif '720p' in name:
                                quality = '720p'
                            elif any(i in ['dvdscr', 'r5', 'r6']
                                     for i in name):
                                quality = 'SCR'
                            elif any(i in [
                                    'camrip', 'tsrip', 'hdcam', 'hdts',
                                    'dvdcam', 'dvdts', 'cam', 'telesync', 'ts'
                            ] for i in name):
                                quality = 'CAM'
                            else:
                                quality = '720p'

                            info = []
                            if '3D' in name or '.3D.' in url:
                                info.append('3D')
                                quality = '1080p'
                            if any(i in ['hevc', 'h265', 'x265']
                                   for i in name):
                                info.append('HEVC')
                            try:
                                size = re.findall(
                                    '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|Gb|MB|MiB|Mb))',
                                    size)[-1]
                                div = 1 if size.endswith(
                                    ('Gb', 'GiB', 'GB')) else 1024
                                size = float(re.sub('[^0-9|/.|/,]', '',
                                                    size)) / div
                                size = '%.2f GB' % size
                                info.append(size)
                            except:
                                pass

                            info = ' | '.join(info)

                            url = client.replaceHTMLCodes(url)
                            url = url.encode('utf-8')
                            if any(x in url
                                   for x in ['.rar', '.zip', '.iso', 'turk']):
                                continue

                            if 'ftp' in url:
                                host = 'COV'
                                direct = True
                            else:
                                direct = False
                                host = 'turbobit.net'
                            #if not host in hostDict: continue

                            host = client.replaceHTMLCodes(host)
                            host = host.encode('utf-8')

                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': direct,
                                'debridonly': False
                            })

                        except:
                            pass
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('UltraHD - Exception: \n' + str(failure))
            return sources
コード例 #25
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None: return sources
            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            url = self.searchMovie(data['title'], data['year'])
            if url is None: return sources

            r = client.request(url)
            data = client.parseDOM(r, 'div', attrs={'class': 'playex'})[0]
            frames = client.parseDOM(data, 'iframe', ret='src')
            frames += re.compile('''<iframe\s*src=['"](.+?)['"]''',
                                 re.DOTALL).findall(data)
            quality = client.parseDOM(r, 'span', attrs={'class':
                                                        'qualityx'})[0]
            for frame in frames:
                url = frame.split('=')[1] if frame.startswith('<') else frame
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                valid, host = source_utils.is_host_valid(url, hostDict)

                if valid:
                    quality, info = source_utils.get_release_quality(
                        quality, url)
                    info = ' | '.join(info)
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': False
                    })

                elif url.endswith('mp4'):
                    url += '|User-Agent=%s' % urllib.quote_plus(client.agent())
                    sources.append({
                        'source': 'MP4',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': True,
                        'debridonly': False
                    })

                elif 'mystream' in url:
                    data = client.request(url)
                    links = dom_parser2.parse_dom(data,
                                                  'source',
                                                  req=['src', 'label'])
                    for link in links:
                        label = link.attrs['label']
                        url = link.attrs[
                            'src'] + '|User-Agent=%s' % urllib.quote_plus(
                                client.agent())

                        sources.append({
                            'source': 'MYSTREAM',
                            'quality': label,
                            'language': 'en',
                            'url': url,
                            'direct': True,
                            'debridonly': False
                        })

                else:
                    continue
            return sources
        except Exception:
            return sources
コード例 #26
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         results_limit = 30
         vshare_limit = 1
         openload_limit = 1
         speedvid_limit = 1
         vidoza_limit = 1
         vidlox_limit = 1
         mango_limit = 1
         streamplay_limit = 1
         vidtodo_limit = 1
         clipwatch_limit = 1
         vidcloud_limit = 1
         vev_limit = 1
         flix555_limit = 1
         if url == None: return sources
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'div', {'class': 'll-item'})
         r = [(dom_parser2.parse_dom(i, 'a', req='href'), \
               dom_parser2.parse_dom(i, 'div', {'class': 'notes'})) \
              for i in r if i]
         r = [(i[0][0].attrs['href'], i[0][0].content,
               i[1][0].content if i[1] else 'None') for i in r]
         for i in r:
             try:
                 url = i[0]
                 url = client.replaceHTMLCodes(url)
                 url = url.encode('utf-8')
                 valid, host = source_utils.is_host_valid(i[1], hostDict)
                 if not valid: continue
                 host = client.replaceHTMLCodes(host)
                 host = host.encode('utf-8')
                 if 'vshare' in host:
                     if vshare_limit < 1:
                         continue
                     else:
                         vshare_limit -= 1
                 if 'openload' in host:
                     if openload_limit < 1:
                         continue
                     else:
                         openload_limit -= 1
                 if 'speedvid' in host:
                     if speedvid_limit < 1:
                         continue
                     else:
                         speedvid_limit -= 1
                 if 'vidoza' in host:
                     if vidoza_limit < 1:
                         continue
                     else:
                         vidoza_limit -= 1
                 if 'vidlox' in host:
                     if vidlox_limit < 1:
                         continue
                     else:
                         vidlox_limit -= 1
                 if 'vidtodo' in host:
                     if vidtodo_limit < 1:
                         continue
                     else:
                         vidtodo_limit -= 1
                 if 'mango' in host:
                     if mango_limit < 1:
                         continue
                     else:
                         mango_limit -= 1
                 if 'streamplay' in host:
                     if streamplay_limit < 1:
                         continue
                     else:
                         streamplay_limit -= 1
                 if 'clipwatch' in host:
                     if clipwatch_limit < 1:
                         continue
                     else:
                         clipwatch_limit -= 1
                 if 'vidcloud' in host:
                     if vidcloud_limit < 1:
                         continue
                     else:
                         vidcloud_limit -= 1
                 if 'vev' in host:
                     if vev_limit < 1:
                         continue
                     else:
                         vev_limit -= 1
                 if 'flix555' in host:
                     if flix555_limit < 1:
                         continue
                     else:
                         flix555_limit -= 1
                 info = []
                 quality, info = source_utils.get_release_quality(
                     i[2], i[2])
                 info = ' | '.join(info)
                 if results_limit < 1:
                     continue
                 else:
                     results_limit -= 1
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'info': info,
                     'direct': False,
                     'debridonly': False
                 })
             except:
                 pass
         return sources
     except:
         return sources
コード例 #27
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)

            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if\
                'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])

            url = self.search_link % urllib.quote_plus(query).lower()
            url = urlparse.urljoin(self.base_link, url)

            headers = {'Referer': url, 'User-Agent': 'Mozilla/5.0'}
            r = self.scraper.get(url, headers=headers).content

            items = dom_parser2.parse_dom(r, 'h2')
            items = [
                dom_parser2.parse_dom(i.content,
                                      'a',
                                      req=['href', 'rel', 'data-wpel-link'])
                for i in items
            ]
            items = [(i[0].content, i[0].attrs['href']) for i in items]

            hostDict = hostprDict + hostDict

            for item in items:
                try:
                    name = item[0]
                    name = client.replaceHTMLCodes(name)
                    query = query.lower().replace(' ', '-')
                    if not query in item[1]:
                        continue
                    url = item[1]
                    headers = {'Referer': url, 'User-Agent': 'Mozilla/5.0'}
                    r = self.scraper.get(url, headers=headers).content
                    links = dom_parser2.parse_dom(
                        r, 'a', req=['href', 'rel', 'data-wpel-link'])
                    links = [i.attrs['href'] for i in links]
                    for url in links:
                        try:
                            if hdlr in name:
                                fmt = re.sub(
                                    '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)',
                                    '', name.upper())
                                fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                                fmt = [i.lower() for i in fmt]

                                if any(
                                        i.endswith(('subs', 'sub', 'dubbed',
                                                    'dub')) for i in fmt):
                                    raise Exception()
                                if any(i in ['extras'] for i in fmt):
                                    raise Exception()

                                if '2160p' in fmt: quality = '4K'
                                elif '1080p' in fmt: quality = '1080p'
                                elif '720p' in fmt: quality = '720p'
                                else: quality = 'SD'
                                if any(i in ['dvdscr', 'r5', 'r6']
                                       for i in fmt):
                                    quality = 'SCR'
                                elif any(i in [
                                        'camrip', 'tsrip', 'hdcam', 'hdts',
                                        'dvdcam', 'dvdts', 'cam', 'telesync',
                                        'ts'
                                ] for i in fmt):
                                    quality = 'CAM'

                                info = []

                                if '3d' in fmt: info.append('3D')

                                try:
                                    size = re.findall(
                                        '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                                        name[2])[-1]
                                    div = 1 if size.endswith(
                                        ('GB', 'GiB')) else 1024
                                    size = float(
                                        re.sub('[^0-9|/.|/,]', '', size)) / div
                                    size = '%.2f GB' % size
                                    info.append(size)
                                except:
                                    pass

                                if any(i in ['hevc', 'h265', 'x265']
                                       for i in fmt):
                                    info.append('HEVC')

                                info = ' | '.join(info)

                                if not any(x in url
                                           for x in ['.rar', '.zip', '.iso']):
                                    url = client.replaceHTMLCodes(url)
                                    url = url.encode('utf-8')

                                    host = re.findall(
                                        '([\w]+[.][\w]+)$',
                                        urlparse.urlparse(
                                            url.strip().lower()).netloc)[0]
                                    if host in hostDict:
                                        host = client.replaceHTMLCodes(host)
                                        host = host.encode('utf-8')

                                        sources.append({
                                            'source': host,
                                            'quality': quality,
                                            'language': 'en',
                                            'url': url,
                                            'info': info,
                                            'direct': False,
                                            'debridonly': True
                                        })
                        except:
                            pass
                except:
                    pass
            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check

            return sources
        except:
            return