コード例 #1
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            queries = [url['title'], '%s-%s' % (url['title'], url['year'])]

            r = requests.get('%s%s' %
                             (self.base_link, self.search_link % queries[0]))
            if not r.ok:
                r = requests.get(
                    '%s%s' % (self.base_link, self.search_link % queries[1]))
            if not r.ok:
                return

            r = r.content
            qual = re.compile('class="quality">(.+?)<').findall(r)

            for i in qual:
                quality = source_utils.check_direct_url(i)

            u = client.parseDOM(r,
                                "div",
                                attrs={"class": "pa-main anime_muti_link"})

            for t in u:
                urls = re.findall('<li class=".+?" data-video="(.+?)"', t)
                for url in urls:
                    if 'vidcloud' in url:
                        url = 'https:' + url
                        r = requests.get(url).content
                        t = re.findall(
                            'li data-status=".+?" data-video="(.+?)"', r)
                        for url in t:
                            if 'vidcloud' in url:
                                continue
                            valid, host = source_utils.is_host_valid(
                                url, hostDict)
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'direct': False,
                                'debridonly': False
                            })
                    if 'vidcloud' in url:
                        continue
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                return sources
        except:
            import traceback
            traceback.print_exc()
            return
コード例 #2
0
ファイル: watchserieshd.py プロジェクト: schubic/install
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None: return sources
         hostDict += ['akamaized.net', 'google.com', 'picasa.com', 'blogspot.com']
         result = self.scraper.get(url, timeout=10).content
         dom = dom_parser.parse_dom(result, 'a', req='data-video')
         urls = [
             i.attrs['data-video'] if i.attrs['data-video'].startswith('https') else 'https:' + i.attrs['data-video']
             for i in dom]
         for url in urls:
             dom = []
             if 'vidnode.net' in url:
                 result = self.scraper.get(url, timeout=10).content
                 dom = dom_parser.parse_dom(result, 'source', req=['src', 'label'])
                 dom = [(i.attrs['src'] if i.attrs['src'].startswith('https') else 'https:' + i.attrs['src'],
                         i.attrs['label']) for i in dom if i]
             elif 'ocloud.stream' in url:
                 result = self.scraper.get(url, timeout=10).content
                 base = re.findall('<base href="([^"]+)">', result)[0]
                 hostDict += [base]
                 dom = dom_parser.parse_dom(result, 'a', req=['href', 'id'])
                 dom = [(i.attrs['href'].replace('./embed', base + 'embed'), i.attrs['id']) for i in dom if i]
                 dom = [(re.findall("var\s*ifleID\s*=\s*'([^']+)", client.request(i[0]))[0], i[1]) for i in dom if i]
             if dom:
                 try:
                     for r in dom:
                         valid, hoster = source_utils.is_host_valid(r[0], hostDict)
                         if not valid: continue
                         quality = source_utils.label_to_quality(r[1])
                         urls, host, direct = source_utils.check_directstreams(r[0], hoster)
                         for x in urls:
                             if direct: size = source_utils.get_size(x['url'])
                             if size:
                                 sources.append(
                                     {'source': host, 'quality': quality, 'language': 'en', 'url': x['url'],
                                      'direct': direct, 'debridonly': False, 'info': size})
                             else:
                                 sources.append(
                                     {'source': host, 'quality': quality, 'language': 'en', 'url': x['url'],
                                      'direct': direct, 'debridonly': False})
                 except:
                     pass
             else:
                 valid, hoster = source_utils.is_host_valid(url, hostDict)
                 if not valid: continue
                 try:
                     url.decode('utf-8')
                     sources.append(
                         {'source': hoster, 'quality': 'SD', 'language': 'en', 'url': url, 'direct': False,
                          'debridonly': False})
                 except:
                     pass
         return sources
     except:
         return sources
コード例 #3
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if url is None:
             return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         q = '%s' % cleantitle.geturl(data['title'])
         url = self.base_link + self.search_link % q.replace('-', '+')
         r = self.scraper.get(url).content
         v = re.compile(
             '<a href="(.+?)" class="ml-mask jt" title="(.+?)">\n<span class=".+?">(.+?)</span>'
         ).findall(r)
         for url, check, quality in v:
             t = '%s (%s)' % (data['title'], data['year'])
             if t not in check: raise Exception()
             r = self.scraper.get(url + '/watch.html').content
             url = re.compile('<iframe.+?src="(.+?)"').findall(r)[0]
             quality = source_utils.check_url(quality)
             valid, host = source_utils.is_host_valid(url, hostDict)
             if valid:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except BaseException:
         return sources
コード例 #4
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            r = self.scraper.get(url).content
            try:
                qual = re.compile('class="quality">(.+?)<').findall(r)

                for i in qual:
                    if 'HD' in i:
                        quality = '1080p'
                    else:
                        quality = 'SD'
                match = re.compile('<iframe src="(.+?)"').findall(r)
                for url in match:
                    valid, host = source_utils.is_host_valid(url, hostDict)
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
            except:
                return
        except Exception:
            return
        return sources
コード例 #5
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = client.request(url)
         try:
             match = re.compile(
                 'href="(.+?)" rel="noindex\,nofollow">Watch This Link</a>'
             ).findall(r)
             for url in match:
                 r = client.request(url)
                 match = re.compile(
                     '<a href="(.+?)://(.+?)/(.+?)"><button class="wpb\_button  wpb\_btn\-primary wpb\_regularsize"> Click Here To Play</button> </a>'
                 ).findall(r)
                 for http, host, url in match:
                     url = '%s://%s/%s' % (http, host, url)
                     info = source_utils.check_url(url)
                     quality = source_utils.check_url(url)
                     valid, host = source_utils.is_host_valid(
                         host, hostDict)
                     if valid:
                         sources.append({
                             'source': host,
                             'quality': quality,
                             'language': 'en',
                             'info': info,
                             'url': url,
                             'direct': False,
                             'debridonly': False
                         })
         except:
             return
     except Exception:
         return
     return sources
コード例 #6
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         hostDict = hostprDict + hostDict
         if url == None: return sources
         r = self.scraper.get(url).content
         quality = re.findall(">(\w+)<\/p", r)
         if quality[0] == "HD":
             quality = "720p"
         else:
             quality = "SD"
         r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         for i in r[0]:
             url = {
                 'url': i.attrs['href'],
                 'data-film': i.attrs['data-film'],
                 'data-server': i.attrs['data-server'],
                 'data-name': i.attrs['data-name']
             }
             url = urllib.urlencode(url)
             valid, host = source_utils.is_host_valid(i.content, hostDict)
             if valid:
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         return sources
コード例 #7
0
ファイル: watchepisodes.py プロジェクト: schubic/install
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle']
            hdlr = 's%02de%02d' % (int(data['season']), int(data['episode']))

            query = urllib.quote_plus(cleantitle.getsearch(title))
            surl = urlparse.urljoin(self.base_link, self.search_link % query)
            r = client.request(surl, XHR=True)
            r = json.loads(r)
            r = r['series']
            for i in r:
                tit = i['value']
                if not cleantitle.get(title) == cleantitle.get(tit):
                    raise Exception()
                slink = i['seo']
                slink = urlparse.urljoin(self.base_link, slink)

                r = client.request(slink)
                if not data['imdb'] in r: raise Exception()
                data = client.parseDOM(r, 'div', {'class': 'el-item\s*'})
                epis = [
                    client.parseDOM(i, 'a', ret='href')[0] for i in data if i
                ]
                epis = [i for i in epis if hdlr in i.lower()][0]
                r = client.request(epis)
                links = client.parseDOM(r, 'a', ret='data-actuallink')
                for url in links:
                    try:
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if not valid: raise Exception()
                        sources.append({
                            'source': host,
                            'quality': 'SD',
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })
                    except BaseException:
                        return sources

            return sources
        except BaseException:
            return sources
コード例 #8
0
ファイル: primewire.py プロジェクト: schubic/install
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            url = urlparse.urljoin(self.base_link, url) if not url.startswith('http') else url

            result = self.scraper.get(url).content
            links = client.parseDOM(result, 'tbody')

            for i in links:
                try:
                    data = [
                        (client.parseDOM(i, 'a', ret='href')[0],
                         client.parseDOM(i, 'span', attrs={'class': 'version_host'})[0])][0]
                    url = urlparse.urljoin(self.base_link, data[0])
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = data[1]
                    valid, host = source_utils.is_host_valid(host, hostDict)
                    if not valid:
                        raise Exception()

                    quality = client.parseDOM(i, 'span', ret='class')[0]
                    quality, info = source_utils.get_release_quality(
                        quality, url)

                    sources.append({'source': host,
                                    'quality': quality,
                                    'language': 'en',
                                    'url': url,
                                    'direct': False,
                                    'debridonly': False})
                except Exception:
                    pass

            return sources
        except Exception:
            return sources
コード例 #9
0
ファイル: watchfree1.py プロジェクト: schubic/install
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         openload_limit = 1
         vshare_limit = 1
         flashx_limit = 1
         thevideobee_limit = 1
         entervideo_limit = 1
         megamp4_limit = 1
         vidtodo_limit = 1
         r = self.scraper.get(url).content
         try:
             v = re.findall('document.write\(Base64.decode\("(.+?)"\)',
                            r)[0]
             b64 = base64.b64decode(v)
             url = client.parseDOM(b64, 'iframe', ret='src')[0]
             try:
                 host = re.findall(
                     '([\w]+[.][\w]+)$',
                     urlparse.urlparse(url.strip().lower()).netloc)[0]
                 host = client.replaceHTMLCodes(host)
                 host = host.encode('utf-8')
                 valid, host = source_utils.is_host_valid(host, hostDict)
                 if valid:
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'en',
                         'url': url.replace('\/', '/'),
                         'direct': False,
                         'debridonly': False
                     })
             except:
                 pass
         except:
             pass
         r = client.parseDOM(r, 'div', {'class': 'server_line'})
         r = [(client.parseDOM(i, 'a', ret='href')[0],
               client.parseDOM(i, 'p', attrs={'class':
                                              'server_servername'})[0])
              for i in r]
         if r:
             for i in r:
                 try:
                     host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
                     url = i[0]
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     if 'other' in host: continue
                     valid, host = source_utils.is_host_valid(
                         host, hostDict)
                     if 'openload' in host:
                         if openload_limit < 1:
                             continue
                         else:
                             openload_limit -= 1
                     if 'vshare' in host:
                         if vshare_limit < 1:
                             continue
                         else:
                             vshare_limit -= 1
                     if 'flashx' in host:
                         if flashx_limit < 1:
                             continue
                         else:
                             flashx_limit -= 1
                     if 'thevideobee' in host:
                         if thevideobee_limit < 1:
                             continue
                         else:
                             thevideobee_limit -= 1
                     if 'entervideo' in host:
                         if entervideo_limit < 1:
                             continue
                         else:
                             entervideo_limit -= 1
                     if 'megamp4' in host:
                         if megamp4_limit < 1:
                             continue
                         else:
                             megamp4_limit -= 1
                     if 'vidtodo' in host:
                         if vidtodo_limit < 1:
                             continue
                         else:
                             vidtodo_limit -= 1
                     if valid:
                         sources.append({
                             'source': host,
                             'quality': 'SD',
                             'language': 'en',
                             'url': url.replace('\/', '/'),
                             'direct': False,
                             'debridonly': False
                         })
                 except:
                     pass
         return sources
     except Exception:
         return
コード例 #10
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None: return sources
         if debrid.status() is False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(
             data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s S%02dE%02d' % (
             data['tvshowtitle'], int(data['season']),
             int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                 data['title'], data['year'])
         query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
         url = self.search_link % urllib.quote_plus(query)
         url = urlparse.urljoin(self.base_link, url)
         html = client.request(url)
         posts = client.parseDOM(html, 'item')
         hostDict = hostprDict + hostDict
         items = []
         for post in posts:
             try:
                 t = client.parseDOM(post, 'title')[0]
                 u = client.parseDOM(post, 'a', ret='href')
                 s = re.search(
                     '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                     post)
                 s = s.groups()[0] if s else '0'
                 items += [(t, i, s) for i in u]
             except:
                 pass
         for item in items:
             try:
                 url = item[1]
                 if any(x in url for x in ['.rar', '.zip', '.iso']):
                     raise Exception()
                 url = client.replaceHTMLCodes(url)
                 url = url.encode('utf-8')
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 if not valid: raise Exception()
                 host = client.replaceHTMLCodes(host)
                 host = host.encode('utf-8')
                 name = item[0]
                 name = client.replaceHTMLCodes(name)
                 t = re.sub(
                     '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                     '',
                     name,
                     flags=re.I)
                 if not cleantitle.get(t) == cleantitle.get(title):
                     raise Exception()
                 y = re.findall(
                     '[\.|\(|\[|\s](\d{4}|S\d*E\d*|S\d*)[\.|\)|\]|\s]',
                     name)[-1].upper()
                 if not y == hdlr: raise Exception()
                 quality, info = source_utils.get_release_quality(name, url)
                 try:
                     size = re.findall(
                         '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                         item[2])[-1]
                     div = 1 if size.endswith(('GB', 'GiB')) else 1024
                     size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                     size = '%.2f GB' % size
                     info.append(size)
                 except:
                     pass
                 info = ' | '.join(info)
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'info': info,
                     'direct': False,
                     'debridonly': True
                 })
             except:
                 pass
         check = [i for i in sources if not i['quality'] == 'CAM']
         if check: sources = check
         return sources
     except:
         return
コード例 #11
0
    def _get_sources(self, item, hostDict):
        try:
            quality, info = source_utils.get_release_quality(item[0], item[1])
            size = item[2] if item[2] != '0' else item[0]

            try:
                size = re.findall(
                    '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))',
                    size)[-1]
                div = 1 if size.endswith(('GB', 'GiB')) else 1024
                size = float(re.sub('[^0-9|/.|/,]', '', size.replace(
                    ',', '.'))) / div
                size = '%.2f GB' % size
                info.append(size)

            except Exception:
                pass

            data = self.scraper.get(item[1]).content

            try:
                r = client.parseDOM(data, 'li', attrs={'class': 'elemento'})
                r = [(dom_parser2.parse_dom(i, 'a', req='href')[0],
                      dom_parser2.parse_dom(i, 'img', req='alt')[0],
                      dom_parser2.parse_dom(i, 'span', {'class': 'd'})[0])
                     for i in r]
                urls = [('http:' + i[0].attrs['href']
                         if not i[0].attrs['href'].startswith('http') else
                         i[0].attrs['href'], i[1].attrs['alt'], i[2].content)
                        for i in r if i[0] and i[1]]

                for url, host, qual in urls:

                    try:
                        if any(x in url
                               for x in ['.rar', '.zip', '.iso', ':Upcoming']):
                            raise Exception()
                        url = client.replaceHTMLCodes(url)
                        url = url.encode('utf-8')

                        valid, host = source_utils.is_host_valid(
                            host, hostDict)
                        if not valid: continue
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')
                        quality, info = source_utils.get_release_quality(
                            qual, quality)
                        info.append('HEVC')
                        info = ' | '.join(info)
                        self._sources.append({
                            'source': host,
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': False,
                            'debridonly': True
                        })
                    except Exception:
                        pass
            except Exception:
                pass

        except BaseException:
            return
コード例 #12
0
ファイル: yifimovies.py プロジェクト: schubic/install
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None: return sources
            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            url = self.searchMovie(data['title'], data['year'])
            if url is None: return sources

            r = client.request(url)
            data = client.parseDOM(r, 'div', attrs={'class': 'playex'})[0]
            frames = client.parseDOM(data, 'iframe', ret='src')
            frames += re.compile('''<iframe\s*src=['"](.+?)['"]''',
                                 re.DOTALL).findall(data)
            quality = client.parseDOM(r, 'span', attrs={'class':
                                                        'qualityx'})[0]
            for frame in frames:
                url = frame.split('=')[1] if frame.startswith('<') else frame
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                valid, host = source_utils.is_host_valid(url, hostDict)

                if valid:
                    quality, info = source_utils.get_release_quality(
                        quality, url)
                    info = ' | '.join(info)
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': False
                    })

                elif url.endswith('mp4'):
                    url += '|User-Agent=%s' % urllib.quote_plus(client.agent())
                    sources.append({
                        'source': 'MP4',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': True,
                        'debridonly': False
                    })

                elif 'mystream' in url:
                    data = client.request(url)
                    links = dom_parser2.parse_dom(data,
                                                  'source',
                                                  req=['src', 'label'])
                    for link in links:
                        label = link.attrs['label']
                        url = link.attrs[
                            'src'] + '|User-Agent=%s' % urllib.quote_plus(
                                client.agent())

                        sources.append({
                            'source': 'MYSTREAM',
                            'quality': label,
                            'language': 'en',
                            'url': url,
                            'direct': True,
                            'debridonly': False
                        })

                else:
                    continue
            return sources
        except Exception:
            return sources
コード例 #13
0
ファイル: cartoonhd.py プロジェクト: schubic/install
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None: return sources
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         imdb = data['imdb']
         aliases = eval(data['aliases'])
         headers = {}
         if 'tvshowtitle' in data:
             url = self.searchShow(title, int(data['season']),
                                   int(data['episode']), aliases, headers)
         else:
             url = self.searchMovie(title, data['year'], aliases, headers)
         r = client.request(url,
                            headers=headers,
                            output='extended',
                            timeout='10')
         if not imdb in r[0]: raise Exception()
         cookie = r[4]
         headers = r[3]
         result = r[0]
         try:
             r = re.findall('(https:.*?redirector.*?)[\'\"]', result)
             for i in r:
                 try:
                     sources.append({
                         'source':
                         'gvideo',
                         'quality':
                         directstream.googletag(i)[0]['quality'],
                         'language':
                         'en',
                         'url':
                         i,
                         'direct':
                         True,
                         'debridonly':
                         False
                     })
                 except:
                     pass
         except:
             pass
         try:
             auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
         except:
             auth = 'false'
         auth = 'Bearer %s' % urllib.unquote_plus(auth)
         headers['Authorization'] = auth
         headers['Referer'] = url
         u = '/ajax/vsozrflxcw.php'
         self.base_link = client.request(self.base_link,
                                         headers=headers,
                                         output='geturl')
         u = urlparse.urljoin(self.base_link, u)
         action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'
         elid = urllib.quote(
             base64.encodestring(str(int(time.time()))).strip())
         token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]
         idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]
         post = {
             'action': action,
             'idEl': idEl,
             'token': token,
             'nopop': '',
             'elid': elid
         }
         post = urllib.urlencode(post)
         cookie += ';%s=%s' % (idEl, elid)
         headers['Cookie'] = cookie
         r = client.request(u,
                            post=post,
                            headers=headers,
                            cookie=cookie,
                            XHR=True)
         r = str(json.loads(r))
         r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r)
         for i in r:
             try:
                 if 'google' in i:
                     quality = 'SD'
                     if 'googleapis' in i:
                         try:
                             quality = source_utils.check_sd_url(i)
                         except:
                             pass
                     if 'googleusercontent' in i:
                         i = directstream.googleproxy(i)
                         try:
                             quality = directstream.googletag(
                                 i)[0]['quality']
                         except:
                             pass
                     sources.append({
                         'source': 'gvideo',
                         'quality': quality,
                         'language': 'en',
                         'url': i,
                         'direct': True,
                         'debridonly': False
                     })
                 elif 'llnwi.net' in i or 'vidcdn.pro' in i:
                     try:
                         quality = source_utils.check_sd_url(i)
                         sources.append({
                             'source': 'CDN',
                             'quality': quality,
                             'language': 'en',
                             'url': i,
                             'direct': True,
                             'debridonly': False
                         })
                     except:
                         pass
                 else:
                     valid, hoster = source_utils.is_host_valid(i, hostDict)
                     if not valid: continue
                     sources.append({
                         'source': hoster,
                         'quality': '720p',
                         'language': 'en',
                         'url': i,
                         'direct': False,
                         'debridonly': False
                     })
             except:
                 pass
         return sources
     except:
         return sources
コード例 #14
0
ファイル: mywatchepseries.py プロジェクト: schubic/install
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         results_limit = 30
         vshare_limit = 1
         openload_limit = 1
         speedvid_limit = 1
         vidoza_limit = 1
         vidlox_limit = 1
         mango_limit = 1
         streamplay_limit = 1
         vidtodo_limit = 1
         clipwatch_limit = 1
         vidcloud_limit = 1
         vev_limit = 1
         flix555_limit = 1
         if url == None: return sources
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'div', {'class': 'll-item'})
         r = [(dom_parser2.parse_dom(i, 'a', req='href'), \
               dom_parser2.parse_dom(i, 'div', {'class': 'notes'})) \
              for i in r if i]
         r = [(i[0][0].attrs['href'], i[0][0].content,
               i[1][0].content if i[1] else 'None') for i in r]
         for i in r:
             try:
                 url = i[0]
                 url = client.replaceHTMLCodes(url)
                 url = url.encode('utf-8')
                 valid, host = source_utils.is_host_valid(i[1], hostDict)
                 if not valid: continue
                 host = client.replaceHTMLCodes(host)
                 host = host.encode('utf-8')
                 if 'vshare' in host:
                     if vshare_limit < 1:
                         continue
                     else:
                         vshare_limit -= 1
                 if 'openload' in host:
                     if openload_limit < 1:
                         continue
                     else:
                         openload_limit -= 1
                 if 'speedvid' in host:
                     if speedvid_limit < 1:
                         continue
                     else:
                         speedvid_limit -= 1
                 if 'vidoza' in host:
                     if vidoza_limit < 1:
                         continue
                     else:
                         vidoza_limit -= 1
                 if 'vidlox' in host:
                     if vidlox_limit < 1:
                         continue
                     else:
                         vidlox_limit -= 1
                 if 'vidtodo' in host:
                     if vidtodo_limit < 1:
                         continue
                     else:
                         vidtodo_limit -= 1
                 if 'mango' in host:
                     if mango_limit < 1:
                         continue
                     else:
                         mango_limit -= 1
                 if 'streamplay' in host:
                     if streamplay_limit < 1:
                         continue
                     else:
                         streamplay_limit -= 1
                 if 'clipwatch' in host:
                     if clipwatch_limit < 1:
                         continue
                     else:
                         clipwatch_limit -= 1
                 if 'vidcloud' in host:
                     if vidcloud_limit < 1:
                         continue
                     else:
                         vidcloud_limit -= 1
                 if 'vev' in host:
                     if vev_limit < 1:
                         continue
                     else:
                         vev_limit -= 1
                 if 'flix555' in host:
                     if flix555_limit < 1:
                         continue
                     else:
                         flix555_limit -= 1
                 info = []
                 quality, info = source_utils.get_release_quality(
                     i[2], i[2])
                 info = ' | '.join(info)
                 if results_limit < 1:
                     continue
                 else:
                     results_limit -= 1
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'info': info,
                     'direct': False,
                     'debridonly': False
                 })
             except:
                 pass
         return sources
     except:
         return sources
コード例 #15
0
ファイル: gowatchseries.py プロジェクト: schubic/install
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']
                if 'season' in data: season = data['season']
                if 'episode' in data: episode = data['episode']
                year = data['year']

                query = urlparse.urljoin(
                    self.base_link, self.search_link %
                    urllib.quote_plus(cleantitle.getsearch(title)))
                r = self.scraper.get(query).content
                r = json.loads(r)['content']
                r = zip(client.parseDOM(r, 'a', ret='href'),
                        client.parseDOM(r, 'a'))

                if 'tvshowtitle' in data:
                    cltitle = cleantitle.get(title + 'season' + season)
                    cltitle2 = cleantitle.get(title +
                                              'season%02d' % int(season))
                    r = [
                        i for i in r if cltitle == cleantitle.get(i[1])
                        or cltitle2 == cleantitle.get(i[1])
                    ]
                    vurl = '%s%s-episode-%s' % (self.base_link, str(
                        r[0][0]).replace('/info', ''), episode)
                    vurl2 = None
                else:
                    cltitle = cleantitle.getsearch(title)
                    cltitle2 = cleantitle.getsearch('%s (%s)' % (title, year))
                    r = [
                        i for i in r if cltitle2 == cleantitle.getsearch(i[1])
                        or cltitle == cleantitle.getsearch(i[1])
                    ]
                    vurl = '%s%s-episode-0' % (self.base_link, str(
                        r[0][0]).replace('/info', ''))
                    vurl2 = '%s%s-episode-1' % (self.base_link, str(
                        r[0][0]).replace('/info', ''))

                r = self.scraper.get(vurl).content

                slinks = client.parseDOM(r,
                                         'div',
                                         attrs={'class': 'anime_muti_link'})
                slinks = client.parseDOM(slinks, 'li', ret='data-video')
                if len(slinks) == 0 and not vurl2 is None:
                    r = self.scraper.get(vurl2).content
                    slinks = client.parseDOM(
                        r, 'div', attrs={'class': 'anime_muti_link'})
                    slinks = client.parseDOM(slinks, 'li', ret='data-video')

                for slink in slinks:
                    try:
                        if 'vidnode.net/streaming.php' in slink:
                            r = self.scraper.get('https:%s' % slink)
                            clinks = re.findall(r'sources:\[(.*?)\]', r)[0]
                            clinks = re.findall(
                                r'file:\s*\'(http[^\']+)\',label:\s*\'(\d+)',
                                clinks)
                            for clink in clinks:
                                q = source_utils.label_to_quality(clink[1])
                                sources.append({
                                    'source': 'cdn',
                                    'quality': q,
                                    'language': 'en',
                                    'url': clink[0],
                                    'direct': True,
                                    'debridonly': False
                                })
                        else:
                            valid, hoster = source_utils.is_host_valid(
                                slink, hostDict)
                            if valid:
                                sources.append({
                                    'source': hoster,
                                    'quality': 'SD',
                                    'language': 'en',
                                    'url': slink,
                                    'direct': False,
                                    'debridonly': False
                                })
                    except:
                        pass

            return sources
        except:
            return sources
コード例 #16
0
ファイル: bestmoviez.py プロジェクト: schubic/install
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            query = '%s s%02de%02d' % (
                data['tvshowtitle'], int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s' % (data['title'])
            query = re.sub('[\\\\:;*?"<>|/ \+\']+', '-', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            r = self.scraper.get(url).content

            r = client.parseDOM(r, "div", attrs={'class': 'entry-content'})[0]
            r = re.sub('shareaholic-canvas.+', '', r, flags=re.DOTALL)

            a_txt = client.parseDOM(r, "a", attrs={'href': '.+?'})
            a_url = client.parseDOM(r, "a", ret="href")
            r = re.sub('<a .+?</a>', '', r, flags=re.DOTALL)
            r = re.sub('<img .+?>', '', r, flags=re.DOTALL)

            size = ''
            pre_txt = []
            pre_url = []
            pres = client.parseDOM(r, "pre", attrs={'style': '.+?'})
            for pre in pres:
                try:
                    size = re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))', pre)[0]
                except:
                    pass

                url0 = re.findall('https?://[^ <"\'\s]+', pre, re.DOTALL)
                txt0 = [size] * len(url0)
                pre_url = pre_url + url0
                pre_txt = pre_txt + txt0

            r = re.sub('<pre .+?</pre>', '', r, flags=re.DOTALL)

            size = ''
            if not 'tvshowtitle' in data:
                try:
                    size = " " + re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))',
                                            r)[0]
                except:
                    pass

            raw_url = re.findall('https?://[^ <"\'\s]+', r, re.DOTALL)
            raw_txt = [size] * len(raw_url)

            pairs = zip(a_url + pre_url + raw_url, a_txt + pre_txt + raw_txt)

            for pair in pairs:
                try:
                    url = str(pair[0])
                    info = re.sub('<.+?>', '', pair[1])

                    if any(x in url for x in ['.rar', '.zip', '.iso']):
                        raise Exception()
                    if not query.lower() in re.sub('[\\\\:;*?"<>|/ \+\'\.]+',
                                                   '-', url + info).lower():
                        raise Exception()

                    size0 = info + " " + size

                    try:
                        size0 = re.findall('([0-9,\.]+ ?(?:GB|GiB|MB|MiB))',
                                           size0)[0]
                        div = 1 if size0.endswith(('GB', 'GiB')) else 1024
                        size0 = float(re.sub('[^0-9\.]', '', size0)) / div
                        size0 = '%.2f GB' % size0
                    except:
                        size0 = ''
                        pass

                    quality, info = source_utils.get_release_quality(url, info)
                    info.append(size0)
                    info = ' | '.join(info)

                    url = url.encode('utf-8')
                    hostDict = hostDict + hostprDict

                    valid, host = source_utils.is_host_valid(url, hostDict)
                    if not valid: continue
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })

                except:
                    pass

            return sources
        except:
            return sources
コード例 #17
0
ファイル: ultrahd.py プロジェクト: schubic/install
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            if debrid.status() is False:
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            post = {'query': data['imdb']}
            url = urlparse.urljoin(self.base_link, 'engine/ajax/search.php')
            r = self.scraper.post(url, data=post).content
            urls = client.parseDOM(r, 'a', ret='href')
            urls = [i for i in urls if not data['imdb'] in i]

            hostDict = hostprDict + hostDict
            links = []
            for u in urls:
                try:
                    data = self.scraper.get(u).content
                    data = re.findall('</iframe>(.+?)QuoteEEnd--><br /><br',
                                      data, re.DOTALL)[0]
                    links += re.findall(
                        '''start--><b>(.+?)</b>.+?<b><a href=['"](.+?)['"]''',
                        data, re.DOTALL)

                except Exception:
                    pass
            links = [(i[0], i[1]) for i in links if not 'vip' in i[0].lower()]
            for name, url in links:
                try:
                    name = re.sub('<.+?>', '', name)
                    if '4K' in name:
                        quality = '4K'
                    elif '1080p' in name:
                        quality = '1080p'
                    elif '720p' in name:
                        quality = '720p'
                    else:
                        quality = 'SD'

                    info = []
                    if '3D' in name or '.3D.' in url:
                        info.append('3D')
                        quality = '1080p'
                    if any(i in ['hevc', 'h265', 'x265'] for i in name):
                        info.append('HEVC')

                    info = ' | '.join(info)

                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')
                    if any(x in url for x in ['.rar', '.zip', '.iso', 'turk']):
                        raise Exception()

                    if 'ftp' in url:
                        host = 'CDN'
                        direct = True
                    else:
                        valid, host = source_utils.is_host_valid(url, hostDict)
                        if not valid: raise Exception()
                        host = host
                        direct = False

                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': direct,
                        'debridonly': True
                    })
                except Exception:
                    pass

            return sources
        except Exception:
            return sources
コード例 #18
0
ファイル: sceneddl.py プロジェクト: schubic/install
    def sources(self, url, hostDict, hostprDict):
        try:
            hostDict = hostDict + hostprDict

            sources = []
            query_bases = []
            options = []

            if url is None:
                return sources

            if not debrid.status():
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = (data['tvshowtitle'] if 'tvshowtitle' in data else data['title'])
            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']

            # tvshowtitle
            if 'tvshowtitle' in data:
                query_bases.append('%s ' % (data['tvshowtitle'].replace("-", "")))
                if 'year' in data:
                    query_bases.append('%s %s ' % (data['tvshowtitle'], data['year']))
                options.append('S%02dE%02d' % (int(data['season']), int(data['episode'])))
                options.append('S%02d' % (int(data['season'])))
            else:
                query_bases.append('%s %s ' % (data['title'], data['year']))
                query_bases.append('%s ' % (data['title']))
                query_bases.append('2160p')
                query_bases.append('')

            for option in options:
                for query_base in query_bases:
                    q = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query_base + option)
                    q = q.replace("  ", " ").replace(" ", "+")
                    url = self.base_link + self.search_link % q
                    html = self.scraper.get(url)
                    if html.status_code == 200:
                        posts = client.parseDOM(html.content, "div", attrs={"class": "title"})
                        for post in posts:
                            url = client.parseDOM(post, "a", ret='href')
                            if len(url) > 0:
                                html = self.scraper.get(url[0])
                                if html.status_code == 200:
                                    quotes = client.parseDOM(html.content, "div", attrs={"class": "dlinks"})
                                    for quote in quotes:
                                        hrefs = client.parseDOM(quote, "a", ret='href')
                                        if not hrefs:
                                            continue
                                        for href in hrefs:
                                            quality = source_utils.check_sd_url(href)
                                            href = href.encode('utf-8')
                                            valid, host = source_utils.is_host_valid(href, hostDict)
                                            if any(x in href for x in ['.rar', '.zip', '.iso']):
                                                continue
                                            if not valid:
                                                continue
                                            if hdlr in href.upper() and cleantitle.get(title) in cleantitle.get(href):
                                                sources.append(
                                                    {'source': host, 'quality': quality, 'language': 'en', 'url': href,
                                                     'direct': False, 'debridonly': False})
                if len(sources) > 0:
                    return sources
            return sources
        except:
            return sources
コード例 #19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources
            if debrid.status() == False: raise Exception()
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']), int(data['episode'])) \
                if 'tvshowtitle' in data else '%s' % (data['title'])
            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url).replace('-', '+')
            r = self.scraper.get(url).content
            posts = client.parseDOM(r,
                                    "div",
                                    attrs={"class": "postpage_movie_download"})
            hostDict = hostprDict + hostDict
            items = []
            for post in posts:
                try:
                    u = client.parseDOM(post, 'a', ret='href')
                    for i in u:
                        items.append(i)
                except:
                    pass

            seen_urls = set()
            for item in items:
                try:
                    i = str(item)
                    r = client.request(i)
                    u = client.parseDOM(r,
                                        "div",
                                        attrs={"class": "multilink_lnks"})
                    for t in u:
                        r = client.parseDOM(t, 'a', ret='href')
                        for url in r:
                            if 'www.share-online.biz' in url:
                                continue
                            if url in seen_urls:
                                continue
                            seen_urls.add(url)
                            quality, info = source_utils.get_release_quality(
                                url)
                            if 'SD' in quality:
                                continue
                            valid, host = source_utils.is_host_valid(
                                url, hostDict)
                            sources.append({
                                'source': host,
                                'quality': quality,
                                'language': 'en',
                                'url': url,
                                'info': info,
                                'direct': False,
                                'debridonly': True
                            })
                except:
                    pass
            return sources
        except:
            return
コード例 #20
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url == None: return sources
         if debrid.status() == False: raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(
             data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s S%02dE%02d' % (
             data['tvshowtitle'], int(data['season']), int(data['episode'])) \
             if 'tvshowtitle' in data else '%s %s' % (data['title'], data['year'])
         url = self.search_link % urllib.quote_plus(query)
         url = urlparse.urljoin(self.base_link, url).replace('-', '+')
         r = self.scraper.get(url).content
         if r == None and 'tvshowtitle' in data:
             season = re.search('S(.*?)E', hdlr)
             season = season.group(1)
             url = title
             r = self.scraper.get(url).content
         for loopCount in range(0, 2):
             if loopCount == 1 or (r == None and 'tvshowtitle' in data):
                 r = self.scraper.get(url).content
             posts = client.parseDOM(r, "h2")
             hostDict = hostprDict + hostDict
             items = []
             for post in posts:
                 try:
                     u = client.parseDOM(post, 'a', ret='href')
                     for i in u:
                         try:
                             name = str(i)
                             items.append(name)
                         except:
                             pass
                 except:
                     pass
             if len(items) > 0: break
         for item in items:
             try:
                 info = []
                 i = str(item)
                 r = self.scraper.get(i).content
                 u = client.parseDOM(r,
                                     "div",
                                     attrs={"class": "entry-content"})
                 for t in u:
                     r = re.compile('a href="(.+?)">.+?<').findall(t)
                     query = query.replace(' ', '.')
                     for url in r:
                         if not query in url:
                             continue
                         if any(x in url for x in ['.rar', '.zip', '.iso']):
                             raise Exception()
                         quality, info = source_utils.get_release_quality(
                             url)
                         valid, host = source_utils.is_host_valid(
                             url, hostDict)
                         sources.append({
                             'source': host,
                             'quality': quality,
                             'language': 'en',
                             'url': url,
                             'info': info,
                             'direct': False,
                             'debridonly': True
                         })
             except:
                 pass
         check = [i for i in sources if not i['quality'] == 'CAM']
         if check: sources = check
         return sources
     except:
         return