Esempio n. 1
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url == None: return sources

            r = self.scraper.get(url).content
            quality = re.findall(">(\w+)<\/p", r)
            if quality[0] == "HD":
                quality = "720p"
            else:
                quality = "SD"
            r = dom_parser2.parse_dom(r, 'div', {'id': 'servers-list'})
            r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]

            for i in r[0]:
                url = {
                    'url': i.attrs['href'],
                    'data-film': i.attrs['data-film'],
                    'data-server': i.attrs['data-server'],
                    'data-name': i.attrs['data-name']
                }
                url = urllib.urlencode(url)
                sources.append({
                    'source': i.content,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'direct': False,
                    'debridonly': False
                })
            return sources
        except:
            return sources
Esempio n. 2
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         r = self.scraper.get(url).content
         r = dom_parser2.parse_dom(r, 'p', {'class': 'server_play'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         r = [(i[0].attrs['href'],
               re.search('/(\w+).html', i[0].attrs['href'])) for i in r
              if i]
         r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
         for i in r:
             try:
                 host = i[1]
                 if str(host) in str(hostDict):
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'en',
                         'url': i[0].replace('\/', '/'),
                         'direct': False,
                         'debridonly': False
                     })
             except:
                 pass
         return sources
     except Exception:
         return
Esempio n. 3
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []
            if url is None: return self._sources
            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            query = self.search_link % cleantitle.geturl(query)
            url = urlparse.urljoin(self.base_link, query)
            r = client.request(url)
            posts = dom_parser2.parse_dom(r, 'div', {'class': 'eTitle'})
            posts = [
                dom_parser2.parse_dom(i.content, 'a', req='href')
                for i in posts if i
            ]
            posts = [(i[0].attrs['href'], re.sub('<.+?>', '', i[0].content))
                     for i in posts if i]
            posts = [
                (i[0], i[1]) for i in posts
                if (cleantitle.get_simple(i[1].split(hdlr)[0]) ==
                    cleantitle.get(title) and hdlr.lower() in i[1].lower())
            ]
            self.hostDict = hostDict + hostprDict
            threads = []

            for i in posts:
                threads.append(workers.Thread(self._get_sources, i))
            [i.start() for i in threads]
            [i.join() for i in threads]

            alive = [x for x in threads if x.is_alive() == True]
            while alive:
                alive = [x for x in threads if x.is_alive() == True]
                time.sleep(0.1)
            return self._sources
        except Exception:
            return self._sources
Esempio n. 4
0
    def sources(self, url, hostDict, hostprDict):

        self.sources = []

        try:
            if url is None:
                return self.sources

            if debrid.status() is False:
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = data['year']
            hdlr2 = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else ''
            imdb = data['imdb']

            url = self.search(title, hdlr)
            headers = {'User-Agent': client.agent()}
            r = self.scraper.get(url, headers=headers).content
            if hdlr2 == '':
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'releases'})[0]
            else:
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'episodes'})[0]
            r = dom_parser2.parse_dom(r.content, 'a', req=['href'])
            r = [(i.content, urlparse.urljoin(self.base_link, i.attrs['href']))
                 for i in r if i and i.content != 'Watch']
            if hdlr2 != '':
                r = [(i[0], i[1]) for i in r if hdlr2.lower() in i[0].lower()]

            self.hostDict = hostDict + hostprDict
            threads = []

            for i in r:
                threads.append(workers.Thread(self._get_sources, i[0], i[1]))
            [i.start() for i in threads]

            alive = [x for x in threads if x.is_alive() is True]
            while alive:
                alive = [x for x in threads if x.is_alive() is True]
                time.sleep(0.1)
            return self.sources
        except:
            return self.sources
    def _get_sources(self, item, hostDict):
        try:
            quality, info = source_utils.get_release_quality(item[0], item[1])
            size = item[2] if item[2] != '0' else item[0]

            try:
                size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GB|GiB|MB|MiB))', size)[-1]
                div = 1 if size.endswith(('GB', 'GiB')) else 1024
                size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                size = '%.2f GB' % size
                info.append(size)

            except Exception:
                pass

            data = self.scraper.get(item[1]).content

            try:
                r = client.parseDOM(data, 'li', attrs={'class': 'elemento'})
                r = [(dom_parser2.parse_dom(i, 'a', req='href')[0],
                      dom_parser2.parse_dom(i, 'img', req='alt')[0],
                      dom_parser2.parse_dom(i, 'span', {'class': 'd'})[0]) for i in r]
                urls = [('http:' + i[0].attrs['href'] if not i[0].attrs['href'].startswith('http') else
                         i[0].attrs['href'], i[1].attrs['alt'], i[2].content) for i in r if i[0] and i[1]]

                for url, host, qual in urls:

                    try:
                        if any(x in url for x in ['.rar', '.zip', '.iso', ':Upcoming']): raise Exception()
                        url = client.replaceHTMLCodes(url)
                        url = url.encode('utf-8')

                        valid, host = source_utils.is_host_valid(host, hostDict)
                        if not valid: continue
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')
                        quality, info = source_utils.get_release_quality(qual, quality)
                        info.append('HEVC')
                        info = ' | '.join(info)
                        self._sources.append(
                            {'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                             'direct': False, 'debridonly': True})
                    except Exception:
                        pass
            except Exception:
                pass

        except BaseException:
            return
Esempio n. 6
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            query = self.tvsearch_link % urllib.quote_plus(
                cleantitle.query(tvshowtitle))
            query = urlparse.urljoin(self.base_link, query.lower())

            result = self.scraper.get(query).content
            result = client.parseDOM(
                result, 'div', attrs={
                    'class': 'index_item.+?'})

            result = [(dom.parse_dom(i, 'a', req=['href', 'title'])[0])
                      for i in result if i]
            result = [
                (i.attrs['href']) for i in result if cleantitle.get(tvshowtitle) == cleantitle.get(
                    re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d+E\d+|S\d+|3D)(\.|\)|\]|\s|)(.+|)',
                        '',
                        i.attrs['title'],
                        flags=re.I))][0]

            url = client.replaceHTMLCodes(result)
            url = url.encode('utf-8')
            return url
        except Exception:
            return
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         clean_title = cleantitle.geturl(url['tvshowtitle']) + '-s%02d' % int(season)
         url = urlparse.urljoin(self.base_link, (self.search_link % (clean_title, url['year'])))
         r = self.scraper.get(url).get
         r = dom_parser2.parse_dom(r, 'div', {'id': 'ip_episode'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         for i in r[0]:
             if i.content == 'Episode %s' % episode:
                 url = i.attrs['href']
         return url
     except:
         return
Esempio n. 8
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title).replace('-', '+')
         url = urlparse.urljoin(self.base_link, (self.search_link % clean_title))
         r = self.scraper.get(url).content
         r = dom_parser2.parse_dom(r, 'div', {'id': 'movie-featured'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         r = [(i[0].attrs['href'], re.search('Release:\s*(\d+)', i[0].content)) for i in r if i]
         r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
         r = [(i[0], i[1]) for i in r if i[1] == year]
         if r[0]:
             url = r[0][0]
             return url
         else:
             return
     except Exception:
         return
Esempio n. 9
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'div', {'class': 'el-item'})
         r = [(dom_parser2.parse_dom(i, 'div', {'class': 'season'}), \
               dom_parser2.parse_dom(i, 'div', {'class': 'episode'}), \
               dom_parser2.parse_dom(i, 'a', req='href')) \
              for i in r if i]
         r = [(i[2][0].attrs['href']) for i in r if i[0][0].content == 'Season %01d' % int(season) \
              and i[1][0].content == 'Episode %01d' % int(episode)]
         if r:
             return r[0]
         else:
             return
     except:
         return
 def resolve(self, url):
     if 'hideurl' in url:
         data = self.scraper.get(url).content
         data = client.parseDOM(data, 'div', attrs={'class': 'row'})
         url = [dom_parser2.parse_dom(i, 'a', req='href')[0] for i in data]
         url = [i.attrs['href'] for i in url if 'direct me' in i.content][0]
         return url
     else:
         return url
Esempio n. 11
0
 def search(self, title, year):
     try:
         url = urlparse.urljoin(
             self.base_link, self.search_link % (urllib.quote_plus(title)))
         headers = {'User-Agent': client.agent()}
         r = self.scraper.get(url, headers=headers).content
         r = dom_parser2.parse_dom(r, 'div', {'class': 'list_items'})[0]
         r = dom_parser2.parse_dom(r.content, 'li')
         r = [(dom_parser2.parse_dom(i, 'a', {'class': 'title'}))
              for i in r]
         r = [(i[0].attrs['href'], i[0].content) for i in r]
         r = [(urlparse.urljoin(self.base_link, i[0])) for i in r
              if cleantitle.get(title) in cleantitle.get(i[1])
              and year in i[1]]
         if r:
             return r[0]
         else:
             return
     except:
         return
Esempio n. 12
0
 def resolve(self, url):
     try:
         r = client.request(url)
         r = dom_parser2.parse_dom(
             r, 'a', req=['href', 'data-episodeid', 'data-linkid'])[0]
         url = r.attrs['href']
         url = client.replaceHTMLCodes(url)
         url = url.encode('utf-8')
         return url
     except:
         return
Esempio n. 13
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         clean_title = cleantitle.geturl(title)
         search_url = urlparse.urljoin(
             self.base_link,
             self.search_link % clean_title.replace('-', '+'))
         r = self.scraper.get(search_url).content
         r = dom_parser2.parse_dom(r, 'li', {'class': 'item'})
         r = [(dom_parser2.parse_dom(i, 'a', attrs={'class': 'title'}),
               re.findall('status-year">(\d{4})</div', i.content,
                          re.DOTALL)[0]) for i in r if i]
         r = [(i[0][0].attrs['href'],
               re.findall('(.+?)</b><br', i[0][0].content,
                          re.DOTALL)[0], i[1]) for i in r if i]
         r = [(i[0], i[1], i[2]) for i in r if (
             cleantitle.get(i[1]) == cleantitle.get(title) and i[2] == year)
              ]
         url = r[0][0]
         return url
     except Exception:
         return
Esempio n. 14
0
 def episode(self, url, imdb, tvdb, title, premiered, season, episode):
     try:
         if url == None: return
         url = urlparse.parse_qs(url)
         url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
         url['premiered'], url['season'], url[
             'episode'] = premiered, season, episode
         try:
             clean_title = cleantitle.geturl(
                 url['tvshowtitle']) + '-season-%d' % int(season)
             search_url = urlparse.urljoin(
                 self.base_link,
                 self.search_link % clean_title.replace('-', '+'))
             r = self.scraper.get(search_url).content
             r = dom_parser2.parse_dom(r, 'li', {'class': 'item'})
             r = [(dom_parser2.parse_dom(i, 'a', attrs={'class': 'title'}),
                   dom_parser2.parse_dom(i,
                                         'div',
                                         attrs={'class': 'status'})[0])
                  for i in r if i]
             r = [(i[0][0].attrs['href'],
                   re.findall('(.+?)</b><br', i[0][0].content,
                              re.DOTALL)[0],
                   re.findall('(\d+)', i[1].content)[0]) for i in r if i]
             r = [(i[0], i[1].split(':')[0], i[2]) for i in r
                  if (cleantitle.get(i[1].split(':')[0]) == cleantitle.get(
                      url['tvshowtitle']) and i[2] == str(int(season)))]
             url = r[0][0]
         except:
             pass
         data = self.scraper.get(url).content
         data = client.parseDOM(data, 'div', attrs={'id': 'details'})
         data = zip(client.parseDOM(data, 'a'),
                    client.parseDOM(data, 'a', ret='href'))
         url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]
         return url[0][1]
     except:
         return
Esempio n. 15
0
    def _get_sources(self, name, url):
        try:
            headers = {'User-Agent': client.agent()}
            r = self.scraper.get(url, headers=headers).content
            name = client.replaceHTMLCodes(name)
            l = dom_parser2.parse_dom(r, 'div', {'class': 'ppu2h'})
            s = ''
            for i in l:
                s += i.content
            urls = re.findall(
                r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''',
                i.content,
                flags=re.MULTILINE | re.DOTALL)
            urls = [
                i for i in urls if '.rar' not in i or '.zip' not in i
                or '.iso' not in i or '.idx' not in i or '.sub' not in i
            ]
            for url in urls:
                if url in str(self.sources):
                    continue

                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid:
                    continue
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')
                quality, info = source_utils.get_release_quality(name, url)
                try:
                    size = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        name)[0]
                    div = 1 if size.endswith(('GB', 'GiB')) else 1024
                    size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                    size = '%.2f GB' % size
                    info.append(size)
                except BaseException:
                    pass
                info = ' | '.join(info)
                self.sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True
                })
        except:
            pass
Esempio n. 16
0
    def _get_sources(self, url):
        try:
            item = client.request(url[0])
            title = url[1]
            links = dom_parser2.parse_dom(item, 'a', req='href')
            links = [i.attrs['href'] for i in links]
            info = []
            try:
                size = re.findall(
                    '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', item)[0]
                div = 1 if size.endswith(('GB', 'GiB')) else 1024
                size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                size = '%.2f GB' % size
                info.append(size)
            except Exception:
                pass
            info = ' | '.join(info)
            for url in links:
                if 'youtube' in url: continue
                if any(x in url.lower()
                       for x in ['.rar.', '.zip.', '.iso.']) or any(
                           url.lower().endswith(x)
                           for x in ['.rar', '.zip', '.iso']):
                    raise Exception()

                if any(x in url.lower()
                       for x in ['youtube', 'sample', 'trailer']):
                    raise Exception()
                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid: continue

                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')
                quality, info2 = source_utils.get_release_quality(title, url)
                if url in str(self._sources): continue

                self._sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True
                })
        except Exception:
            pass
 def _get_items(self, url):
     try:
         headers = {'User-Agent': client.agent()}
         r = client.request(url, headers=headers)
         posts = client.parseDOM(r, 'table', attrs={'class': 'table2'})[0]
         posts = client.parseDOM(posts, 'tr')
         for post in posts:
             data = dom_parser2.parse_dom(post, 'a', req='href')[1]
             link = urlparse.urljoin(self.base_link, data.attrs['href'])
             name = data.content
             t = name.split(self.hdlr)[0]
             if not cleantitle.get(re.sub('(|)', '', t)) == cleantitle.get(
                     self.title):
                 continue
             try:
                 y = re.findall(
                     '[\.|\(|\[|\s|\_|\-](S\d+E\d+|S\d+)[\.|\)|\]|\s|\_|\-]',
                     name, re.I)[-1].upper()
             except BaseException:
                 y = re.findall(
                     '[\.|\(|\[|\s\_|\-](\d{4})[\.|\)|\]|\s\_|\-]', name,
                     re.I)[-1].upper()
             if not y == self.hdlr: continue
             try:
                 size = re.findall(
                     '((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                     post)[0]
                 div = 1 if size.endswith('GB') else 1024
                 size = float(
                     re.sub('[^0-9|/.|/,]', '', size.replace(',',
                                                             '.'))) / div
                 size = '%.2f GB' % size
             except BaseException:
                 size = '0'
             self.items.append((name, link, size))
         return self.items
     except BaseException:
         return self.items
Esempio n. 18
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         if url is None:
             return sources
         if debrid.status() is False:
             raise Exception()
         data = urlparse.parse_qs(url)
         data = dict([(i, data[i][0]) if data[i] else (i, '')
                      for i in data])
         title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
             'title']
         hdlr = 'S%02dE%02d' % (int(data['season']), int(
             data['episode'])) if 'tvshowtitle' in data else data['year']
         query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if \
             'tvshowtitle' in data else '%s' % (data['title'])
         url = self.search_link % urllib.quote_plus(query).lower()
         url = urlparse.urljoin(self.base_link, url)
         headers = {'Referer': url}
         r = self.scraper.get(url, headers=headers).content
         items = dom_parser2.parse_dom(r, 'h2')
         items = [
             dom_parser2.parse_dom(i.content,
                                   'a',
                                   req=['href', 'rel', 'data-wpel-link'])
             for i in items
         ]
         items = [(i[0].content, i[0].attrs['href']) for i in items]
         hostDict = hostprDict + hostDict
         for item in items:
             try:
                 name = item[0]
                 name = client.replaceHTMLCodes(name)
                 query = query.lower().replace(' ', '-')
                 if not query in item[1]:
                     continue
                 url = item[1]
                 headers = {'Referer': url}
                 r = self.scraper.get(url, headers=headers).content
                 links = dom_parser2.parse_dom(
                     r, 'a', req=['href', 'rel', 'data-wpel-link'])
                 links = [i.attrs['href'] for i in links]
                 for url in links:
                     try:
                         if hdlr in name.upper() and cleantitle.get(
                                 title) in cleantitle.get(name):
                             fmt = re.sub(
                                 '(.+)(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*)(\.|\)|\]|\s)',
                                 '', name.upper())
                             fmt = re.split('\.|\(|\)|\[|\]|\s|\-', fmt)
                             fmt = [i.lower() for i in fmt]
                             if any(
                                     i.endswith(('subs', 'sub', 'dubbed',
                                                 'dub')) for i in fmt):
                                 raise Exception()
                             if any(i in ['extras'] for i in fmt):
                                 raise Exception()
                             quality, info = source_utils.get_release_quality(
                                 name, url)
                             try:
                                 size = re.findall(
                                     '((?:\d+\.\d+|\d+\,\d+|\d+) (?:GB|GiB|MB|MiB))',
                                     name[2])[-1]
                                 div = 1 if size.endswith(
                                     ('GB', 'GiB')) else 1024
                                 size = float(
                                     re.sub('[^0-9|/.|/,]', '', size)) / div
                                 size = '%.2f GB' % size
                                 info.append(size)
                             except:
                                 pass
                             info = ' | '.join(info)
                             if not any(x in url
                                        for x in ['.rar', '.zip', '.iso']):
                                 url = client.replaceHTMLCodes(url)
                                 url = url.encode('utf-8')
                                 host = \
                                     re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                                 if host in hostDict:
                                     host = client.replaceHTMLCodes(host)
                                     host = host.encode('utf-8')
                                     sources.append({
                                         'source': host,
                                         'quality': quality,
                                         'language': 'en',
                                         'url': url,
                                         'info': info,
                                         'direct': False,
                                         'debridonly': True
                                     })
                     except:
                         pass
             except:
                 pass
         check = [i for i in sources if not i['quality'] == 'CAM']
         if check:
             sources = check
         return sources
     except:
         return
Esempio n. 19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None:
                return sources
            if debrid.status() is False:
                raise Exception()

            hostDict = hostprDict + hostDict

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(data['episode'])) if 'tvshowtitle' in data else data['year']
            query = '%s S%02dE%02d' % (data['tvshowtitle'], int(data['season']), int(data['episode'])) if \
                'tvshowtitle' in data else '%s' % (data['title'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)
            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)
            headers = {'Referer': self.base_link}
            r = self.scraper.get(url, headers=headers).content

            search_results = dom_parser2.parse_dom(r, 'h2')
            search_results = [dom_parser2.parse_dom(i.content, 'a', req=['href']) for i in search_results]
            search_results = [(i[0].content, i[0].attrs['href']) for i in search_results]

            items = []
            for search_result in search_results:
                try:
                    headers = {'Referer': url}
                    r = self.scraper.get(search_result[1], headers=headers).content
                    links = dom_parser2.parse_dom(r, 'a', req=['href', 'rel', ])
                    links = [i.attrs['href'] for i in links]
                    for url in links:
                        try:
                            if hdlr in url.upper() and cleantitle.get(title) in cleantitle.get(url):
                                items.append(url)
                        except:
                            pass
                except:
                    pass

            seen_urls = set()
            for item in items:
                try:
                    url = str(item)
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    if url in seen_urls:
                        continue
                    seen_urls.add(url)

                    if any(x in url for x in
                           ['.part', 'extras', 'subs', 'dubbed', 'dub', 'MULTISUBS', 'sample', 'youtube', 'trailer']) \
                            or any(url.endswith(x) for x in ['.rar', '.zip', '.iso', '.sub', '.idx', '.srt']):
                        raise Exception()
                    quality, info = source_utils.get_release_quality(url, url)
                    host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                    if host in hostDict:
                        host = client.replaceHTMLCodes(host)
                        host = host.encode('utf-8')
                        sources.append(
                            {'source': host, 'quality': quality, 'language': 'en', 'url': url, 'info': info,
                             'direct': False, 'debridonly': True})
                except:
                    pass

            return sources
        except:
            return sources
Esempio n. 20
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            if url is None: return sources
            if debrid.status() is False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            url = self.searchMovie(data['title'], data['year'])
            if url is None: return sources

            r = client.request(url)
            data = client.parseDOM(r, 'div', attrs={'class': 'playex'})[0]
            frames = client.parseDOM(data, 'iframe', ret='src')
            frames += re.compile('''<iframe\s*src=['"](.+?)['"]''',
                                 re.DOTALL).findall(data)
            quality = client.parseDOM(r, 'span', attrs={'class':
                                                        'qualityx'})[0]
            for frame in frames:
                url = frame.split('=')[1] if frame.startswith('<') else frame
                url = client.replaceHTMLCodes(url)
                url = url.encode('utf-8')

                valid, host = source_utils.is_host_valid(url, hostDict)

                if valid:
                    quality, info = source_utils.get_release_quality(
                        quality, url)
                    info = ' | '.join(info)
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'info': info,
                        'direct': False,
                        'debridonly': False
                    })

                elif url.endswith('mp4'):
                    url += '|User-Agent=%s' % urllib.quote_plus(client.agent())
                    sources.append({
                        'source': 'MP4',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': True,
                        'debridonly': False
                    })

                elif 'mystream' in url:
                    data = client.request(url)
                    links = dom_parser2.parse_dom(data,
                                                  'source',
                                                  req=['src', 'label'])
                    for link in links:
                        label = link.attrs['label']
                        url = link.attrs[
                            'src'] + '|User-Agent=%s' % urllib.quote_plus(
                                client.agent())

                        sources.append({
                            'source': 'MYSTREAM',
                            'quality': label,
                            'language': 'en',
                            'url': url,
                            'direct': True,
                            'debridonly': False
                        })

                else:
                    continue
            return sources
        except Exception:
            return sources
Esempio n. 21
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         results_limit = 30
         vshare_limit = 1
         openload_limit = 1
         speedvid_limit = 1
         vidoza_limit = 1
         vidlox_limit = 1
         mango_limit = 1
         streamplay_limit = 1
         vidtodo_limit = 1
         clipwatch_limit = 1
         vidcloud_limit = 1
         vev_limit = 1
         flix555_limit = 1
         if url == None: return sources
         r = client.request(url)
         r = dom_parser2.parse_dom(r, 'div', {'class': 'll-item'})
         r = [(dom_parser2.parse_dom(i, 'a', req='href'), \
               dom_parser2.parse_dom(i, 'div', {'class': 'notes'})) \
              for i in r if i]
         r = [(i[0][0].attrs['href'], i[0][0].content,
               i[1][0].content if i[1] else 'None') for i in r]
         for i in r:
             try:
                 url = i[0]
                 url = client.replaceHTMLCodes(url)
                 url = url.encode('utf-8')
                 valid, host = source_utils.is_host_valid(i[1], hostDict)
                 if not valid: continue
                 host = client.replaceHTMLCodes(host)
                 host = host.encode('utf-8')
                 if 'vshare' in host:
                     if vshare_limit < 1:
                         continue
                     else:
                         vshare_limit -= 1
                 if 'openload' in host:
                     if openload_limit < 1:
                         continue
                     else:
                         openload_limit -= 1
                 if 'speedvid' in host:
                     if speedvid_limit < 1:
                         continue
                     else:
                         speedvid_limit -= 1
                 if 'vidoza' in host:
                     if vidoza_limit < 1:
                         continue
                     else:
                         vidoza_limit -= 1
                 if 'vidlox' in host:
                     if vidlox_limit < 1:
                         continue
                     else:
                         vidlox_limit -= 1
                 if 'vidtodo' in host:
                     if vidtodo_limit < 1:
                         continue
                     else:
                         vidtodo_limit -= 1
                 if 'mango' in host:
                     if mango_limit < 1:
                         continue
                     else:
                         mango_limit -= 1
                 if 'streamplay' in host:
                     if streamplay_limit < 1:
                         continue
                     else:
                         streamplay_limit -= 1
                 if 'clipwatch' in host:
                     if clipwatch_limit < 1:
                         continue
                     else:
                         clipwatch_limit -= 1
                 if 'vidcloud' in host:
                     if vidcloud_limit < 1:
                         continue
                     else:
                         vidcloud_limit -= 1
                 if 'vev' in host:
                     if vev_limit < 1:
                         continue
                     else:
                         vev_limit -= 1
                 if 'flix555' in host:
                     if flix555_limit < 1:
                         continue
                     else:
                         flix555_limit -= 1
                 info = []
                 quality, info = source_utils.get_release_quality(
                     i[2], i[2])
                 info = ' | '.join(info)
                 if results_limit < 1:
                     continue
                 else:
                     results_limit -= 1
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'info': info,
                     'direct': False,
                     'debridonly': False
                 })
             except:
                 pass
         return sources
     except:
         return sources