Ejemplo n.º 1
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['spacemov.is']
     self.base_link = 'https://www0.spacemov.is'
     self.search_link = '/search-query/%s+%s/'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 2
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['netflixlogin.in']
     self.base_link = 'http://netflixlogin.in'
     self.search_link = '/search-movies/%s.html'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 3
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         scraper = cfscrape.create_scraper()
         r = scraper.get(url).content
         try:
             match = re.compile('<iframe .+?src="(.+?)"').findall(r)
             for url in match:
                 if 'youtube' in url:
                     continue
                 valid, hoster = source_utils.is_host_valid(url, hostDict)
                 if not valid:
                     continue
                 sources.append({
                     'source': hoster,
                     'quality': 'SD',
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         except Exception:
             return sources
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('HackIMDB - Exception: \n' + str(failure))
         return sources
     return sources
Ejemplo n.º 4
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['l23movies.com']
     self.base_link = 'http://l23movies.com'
     self.movies_search_path = ('search-movies/%s.html')
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 5
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['seehd.pl']
     self.base_link = 'http://www.seehd.pl'
     self.search_link = '/%s-%s-watch-online/'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 6
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['123movieser.com']
     self.base_link = 'http://123movieser.com'
     self.search_link = '/watch/%s-%s-online-free-123movies.html'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 7
0
    def __init__(self):
        self.priority = 1
        self.language = ['en']
        self.domains = ['ondarewatch.com', 'dailytvfix.com']
        self.base_link = 'http://www.dailytvfix.com'

        self.scraper = cfscrape.create_scraper()
Ejemplo n.º 8
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['ddlvalley.me']
     self.base_link = 'http://www.ddlvalley.me'
     self.search_link = 'search/%s/'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 9
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['downflix.win']
     self.base_link = 'https://en.downflix.win'
     self.search_link = '/%s-%s/'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 10
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['azmovie.to']
     self.base_link = 'https://azmovie.to'
     self.search_link = '/watch.php?title=%s'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 11
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(cleantitle.getsearch(titles[0])))

            query = urlparse.urljoin(self.base_link, query)

            t = cleantitle.get(titles[0])
            scraper = cfscrape.create_scraper()
            data = scraper.get(query).content
            #data = client.request(query, referer=self.base_link)
            data = client.parseDOM(data, 'div', attrs={'class': 'result-item'})
            r = dom_parser.parse_dom(data, 'div', attrs={'class': 'title'})
            r = zip(dom_parser.parse_dom(r, 'a'), dom_parser.parse_dom(data, 'span', attrs={'class': 'year'}))

            url = []
            for i in range(len(r)):
                title = cleantitle.get(r[i][0][1])
                title = re.sub('(\d+p|4k|3d|hd|season\d+)','',title)
                y = r[i][1][1]
                link = r[i][0][0]['href']
                if 'season' in title: continue
                if t == title and y == year:
                    if 'season' in link:
                        url.append(source_utils.strip_domain(link))
                        print url[0]
                        return url[0]
                    else: url.append(source_utils.strip_domain(link))

            return url
        except:
            return
Ejemplo n.º 12
0
 def __init__(self):
     self.priority = 0
     self.language = ['en']
     self.domains = ['watchserieshd.io']
     self.base_link = 'https://www4.watchserieshd.io'
     self.search_link = 'search.html?keyword=%s'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 13
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['gostream.sc']
     self.base_link = 'https://www3.gostream.sc'
     self.search_link = '/watch/%s-%s-gostream.html'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 14
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         scraper = cfscrape.create_scraper()
         r = scraper.get(url).content
         try:
             qual = re.compile('class="quality">(.+?)<').findall(r)
             print qual
             for i in qual:
                 if 'HD' in i:
                     quality = '1080p'
                 else:
                     quality = 'SD'
             match = re.compile('<iframe src="(.+?)"').findall(r)
             for url in match:
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 sources.append({'source': host, 'quality': quality, 'language': 'en',
                                 'url': url, 'direct': False, 'debridonly': False})
         except Exception:
             return sources
     except Exception:
         failure = traceback.format_exc()
         log_utils.log('PLAYMOVIES - Exception: \n' + str(failure))
         return sources
     return sources
Ejemplo n.º 15
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['xmovies8.tv', 'xmovies8.ru', 'xmovies8.es', 'xmovies8.nz']
     self.base_link = 'https://xmovies8.pl'
     self.search_link = '/movies/search?s=%s'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 16
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['scene-rls.com', 'scene-rls.net']
     self.base_link = 'http://scene-rls.net'
     self.search_link = '/?s=%s&submit=Find'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 17
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if url == None: return
            scraper = cfscrape.create_scraper()
            url = urlparse.parse_qs(url)
            url = dict([(i, url[i][0]) if url[i] else (i, '') for i in url])
            url['premiered'], url['season'], url['episode'] = premiered, season, episode
            try:
                clean_title = cleantitle.geturl(url['tvshowtitle'])+'-season-%d' % int(season)
                search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
                r = scraper.get(search_url).content
                r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
                r = [(client.parseDOM(i, 'a', ret='href'),
                      re.findall('<b><i>(.+?)</i>', i)) for i in r]
                r = [(i[0][0], i[1][0]) for i in r if
                     cleantitle.get(i[1][0]) == cleantitle.get(clean_title)]
                url = r[0][0]
            except:
                pass
            data = scraper.get(url).content
            data = client.parseDOM(data, 'div', attrs={'id': 'details'})
            data = zip(client.parseDOM(data, 'a'), client.parseDOM(data, 'a', ret='href'))
            url = [(i[0], i[1]) for i in data if i[0] == str(int(episode))]

            return url[0][1]
        except:
            return
Ejemplo n.º 18
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['cutemovie.net']
     self.base_link = 'http://www1.cutemovie.net'
     self.search_link = '/search-movies/%s.html'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 19
0
 def __init__(self):
     self.priority = 0
     self.language = ['en']
     self.domains = ['tv-release.pw', 'tv-release.immunicity.st']
     self.base_link = 'http://tv-release.pw'
     self.search_link = '?s=%s'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 20
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['moviesonline.mx']
     self.base_link = 'http://www2.moviesonline.mx'
     self.search_link = '/search-movies/%s.html'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 21
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['hdpopcorns.co','hdpopcorns.eu']
     self.base_link = 'http://hdpopcorns.co'
     self.search_link = '/?s=%s'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 22
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['ultrahdindir.com']
     self.base_link = 'http://ultrahdindir.com'
     self.post_link = '/index.php?do=search'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 23
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['fmovies.sc']
     self.base_link = 'http://www4.fmovies.sc'
     self.search_link = '/watch/%s-%s-online.html'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 24
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['hdmega.unblckd.cc']
     self.base_link = 'http://hdmega.unblckd.ink/'
     self.search_link = '/search-movies/%s.html'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 25
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['123hbo.com']
     self.base_link = 'http://www0.123hbo.com'
     self.search_link = '/search-movies/%s.html'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 26
0
    def __get_episode_url(self, data, hostDict):
        scraper = cfscrape.create_scraper()
        try:
            value = "/seasons/" + cleantitle.geturl(data['tvshowtitle']) + '-season-' + data['season']
            url = self.base_link + value
            print("INFO - " + url)
            html = scraper.get(self.base_link)
            html = scraper.get(url)
            page_list = BeautifulSoup(html.text, 'html.parser')
            page_list = page_list.find_all('div', {'class':'episodiotitle'})
            ep_page = ''
            for i in page_list:
                if re.sub(r'\W+', '', data['title'].lower()) in re.sub(r'\W+', '', i.text.lower()):
                    ep_page = i.prettify()
            if ep_page == '': return ''
            ep_page = BeautifulSoup(ep_page, 'html.parser').find_all('a')[0]['href']
            html = scraper.get(ep_page)
            embed = re.findall('<iframe.+?src=\"(.+?)\"', html.text)[0]
            url = embed
            sources = []
            if 'mehliz' in url:
                html = scraper.get(url, headers={'referer': self.base_link + '/'})
                files = re.findall('file: \"(.+?)\".+?label: \"(.+?)\"', html.text)

                for i in files:
                    try:
                        sources.append({
                            'source': 'gvideo',
                            'quality': i[2],
                            'language': 'en',
                            'url': i[0] + "|Referer=https://www.mehlizmovies.com",
                            'direct': True,
                            'debridonly': False
                        })

                    except Exception:
                        pass

            else:
                valid, hoster = source_utils.is_host_valid(url, hostDict)
                if not valid: return ''
                urls, host, direct = source_utils.check_directstreams(url, hoster)

                sources.append({
                    'source': host,
                    'quality': urls[0]['quality'],
                    'language': 'en',
                    'url': url + "|Referer=https://www.mehlizmovies.com",
                    'direct': False,
                    'debridonly': False
                })


            return sources

        except Exception:
            print("Unexpected error in Mehlix _get_episode_url Script:")
            exc_type, exc_obj, exc_tb = sys.exc_info()
            print(exc_type, exc_tb.tb_lineno)
            return ""
Ejemplo n.º 27
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['breakfreemovies.biz']
     self.base_link = 'https://alphareign.lol/'
     self.search_link = '/movies.php?list=search&search=%s'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 28
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['movie4k.to']
     self._base_link = None
     self.search_link = '/movies.php?list=search&search=%s'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 29
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['wrzcraft.net']
     self.base_link = 'http://wrzcraft.net'
     self.search_link = '/search/%s/feed/rss2/'
     self.scraper = cfscrape.create_scraper()
Ejemplo n.º 30
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         scraper = cfscrape.create_scraper()
         url = {'imdb': imdb, 'title': title, 'year': year}
         return url
     except:
         return
Ejemplo n.º 31
0
    def sources(self, url, hostDict, hostprDict):

        self._sources = []

        try:
            if url is None: return self._sources

            if debrid.status() is False: raise Exception()
            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = data['year']
            hdlr2 = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else ''
            imdb = data['imdb']

            url = self.search(title, hdlr)
            headers = {'User-Agent': client.agent()}
            scraper = cfscrape.create_scraper()
            r = scraper.get(url, headers=headers).content
            if hdlr2 == '':
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'releases'})[0]
            else:
                r = dom_parser2.parse_dom(r, 'ul', {'id': 'episodes'})[0]
            r = dom_parser2.parse_dom(r.content, 'a', req=['href'])
            r = [(i.content, urlparse.urljoin(self.base_link, i.attrs['href']))
                 for i in r if i and i.content != 'Watch']
            if hdlr2 != '':
                r = [(i[0], i[1]) for i in r if hdlr2.lower() in i[0].lower()]

            self.hostDict = hostDict + hostprDict
            threads = []

            for i in r:
                threads.append(workers.Thread(self._get_sources, i[0], i[1]))
            [i.start() for i in threads]

            alive = [x for x in threads if x.is_alive() == True]
            while alive:
                alive = [x for x in threads if x.is_alive() == True]
                time.sleep(0.1)
            return self._sources
        except BaseException:
            return self._sources
Ejemplo n.º 32
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         scraper = cfscrape.create_scraper()
         r = scraper.get(url).content
         try:
             v = re.findall('document.write\(Base64.decode\("(.+?)"\)', r)[0]
             b64 = base64.b64decode(v)
             url = client.parseDOM(b64, 'iframe', ret='src')[0]
             try:
                 host = re.findall('([\w]+[.][\w]+)$', urlparse.urlparse(url.strip().lower()).netloc)[0]
                 host = client.replaceHTMLCodes(host)
                 host = host.encode('utf-8')
                 sources.append({
                     'source': host,
                     'quality': 'SD',
                     'language': 'en',
                     'url': url.replace('\/', '/'),
                     'direct': False,
                     'debridonly': False
                 })
             except:
                 pass
         except:
             pass
         r = client.parseDOM(r, 'div', {'class': 'server_line'})
         r = [(client.parseDOM(i, 'a', ret='href')[0], client.parseDOM(i, 'p', attrs={'class': 'server_servername'})[0]) for i in r]
         if r:
             for i in r:
                 try:
                     host = re.sub('Server|Link\s*\d+', '', i[1]).lower()
                     url = i[0]
                     host = client.replaceHTMLCodes(host)
                     host = host.encode('utf-8')
                     if 'other'in host: continue
                     sources.append({
                         'source': host,
                         'quality': 'SD',
                         'language': 'en',
                         'url': url.replace('\/', '/'),
                         'direct': False,
                         'debridonly': False
                     })
                 except:
                     pass
         return sources
     except Exception:
         return
Ejemplo n.º 33
0
    def _get_sources(self, name, url):
        try:
            headers = {'User-Agent': client.agent()}
            scraper = cfscrape.create_scraper()
            r = scraper.get(url, headers=headers).content
            name = client.replaceHTMLCodes(name)
            l = dom_parser2.parse_dom(r, 'div', {'class': 'ppu2h'})
            s = ''
            for i in l:
                s += i.content
            urls = re.findall(
                r'''((?:http|ftp|https)://[\w_-]+(?:(?:\.[\w_-]+)+)[\w.,@?^=%&:/~+#-]*[\w@?^=%&/~+#-])''',
                i.content,
                flags=re.MULTILINE | re.DOTALL)
            urls = [
                i for i in urls if '.rar' not in i or '.zip' not in i
                or '.iso' not in i or '.idx' not in i or '.sub' not in i
            ]
            for url in urls:
                if url in str(self._sources): continue

                valid, host = source_utils.is_host_valid(url, self.hostDict)
                if not valid: continue
                host = client.replaceHTMLCodes(host)
                host = host.encode('utf-8')
                quality, info = source_utils.get_release_quality(name, url)
                try:
                    size = re.findall(
                        '((?:\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))',
                        name)[0]
                    div = 1 if size.endswith(('GB', 'GiB')) else 1024
                    size = float(re.sub('[^0-9|/.|/,]', '', size)) / div
                    size = '%.2f GB' % size
                    info.append(size)
                except BaseException:
                    pass
                info = ' | '.join(info)
                self._sources.append({
                    'source': host,
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': False,
                    'debridonly': True
                })
        except BaseException:
            pass
Ejemplo n.º 34
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         scraper = cfscrape.create_scraper()
         clean_title = cleantitle.geturl(title)
         search_url = urlparse.urljoin(self.base_link, self.search_link % clean_title.replace('-', '+'))
         r = scraper.get(search_url).content
         r = client.parseDOM(r, 'div', {'id': 'movie-featured'})
         r = [(client.parseDOM(i, 'a', ret='href'),
               re.findall('.+?elease:\s*(\d{4})</', i),
               re.findall('<b><i>(.+?)</i>', i)) for i in r]
         r = [(i[0][0], i[1][0], i[2][0]) for i in r if
              (cleantitle.get(i[2][0]) == cleantitle.get(title) and i[1][0] == year)]
         url = r[0][0]
         return url
     except Exception:
         return
Ejemplo n.º 35
0
 def searchShow(self, title, season, aliases, headers):
     try:
         scraper = cfscrape.create_scraper()
         title = cleantitle.normalize(title)
         search = '%s Season %01d' % (title, int(season))
         url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(search))
         r = scraper.get(url).content
         r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
         r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='title'))
         r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1])) for i in r]
         r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
         url = [i[0] for i in r if self.matchAlias(i[2][0], aliases) and i[2][1] == season][0]
         url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
         return url
     except:
         return
Ejemplo n.º 36
0
    def __get_movie_url(self, data, hostDict):
        scraper = cfscrape.create_scraper()
        try:
            html = scraper.get(self.base_link +"/movies/"+cleantitle.geturl(data['title']))
            embeds = re.findall('play-box-iframe.+\s<iframe.+?src=\"(.+?)\"', html.text)[0]
            print("INFO - " + embeds)
            url = embeds
            sources = []
            if 'mehliz' in url:
                html = scraper.get(url, headers={'referer': self.base_link + '/'})
                files = re.findall('file: \"(.+?)\".+?label: \"(.+?)\"', html.text)

                for i in files:
                    try:
                        sources.append({
                            'source': 'gvideo',
                            'quality': i[1],
                            'language': 'en',
                            'url': i[0] + "|Referer=https://www.mehlizmovies.is",
                            'direct': True,
                            'debridonly': False
                        })

                    except Exception:
                        pass

            else:
                valid, hoster = source_utils.is_host_valid(url, hostDict)
                if not valid: return ''
                urls, host, direct = source_utils.check_directstreams(url, hoster)

                sources.append({
                    'source': host,
                    'quality': urls[0]['quality'],
                    'language': 'en',
                    'url': url + "|Referer=https://www.mehlizmovies.is",
                    'direct': False,
                    'debridonly': False
                })

            return sources

        except Exception:
            print("Unexpected error in Mehliz getMovieURL Script:")
            exc_type, exc_obj, exc_tb = sys.exc_info()
            print(exc_type, exc_tb.tb_lineno)
            return ""
Ejemplo n.º 37
0
def get(url, Type=None):
    if not url:
        return
    if Type == 'client' or Type == None:
        from resources.lib.modules import client
        content = client.request(url, headers=headers)
    if Type == 'cfscrape':
        from resources.lib.modules import cfscrape
        cfscraper = cfscrape.create_scraper()
        content = cfscraper.get(url, headers=headers).content
    if Type == 'redirect':
        import requests
        content = requests.get(url, headers=headers).url
    if content is None:
        log_utils.log('getSum - Get ERROR:  No Content Got for:  ' + str(url))
        raise Exception()
    return content
Ejemplo n.º 38
0
    def sources(self, url, hostDict, hostprDict):
        try:
            self._sources = []

            if url is None:
                return self._sources

            if not debrid.status():
                raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            self.imdb = data['imdb']
            content = 'tvshow' if 'season' in data else 'movie'
            self.hdlr = 'S%02dE%02d' % (
                int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else data['year']

            items = []
            self.hostDict = hostprDict + hostDict

            query = '%s S%02dE%02d' % (self.imdb, int(data['season']), int(data['episode'])) if 'tvshowtitle' in data\
                else '%s' % self.imdb

            url = urlparse.urljoin(self.base_link,
                                   self.search_link.format(query))
            scraper = cfscrape.create_scraper()
            headers = {'User-Agent': client.agent(), 'Referer': self.base_link}
            r = scraper.get(url, headers=headers).content

            data = client.parseDOM(r, 'article')
            links = [client.parseDOM(i, 'a', ret='href')[0] for i in data if i]

            threads = []
            for i in links:
                threads.append(workers.Thread(self._get_sources, i))
            [i.start() for i in threads]
            [i.join() for i in threads]

            return self._sources
        except BaseException:
            return self._sources
Ejemplo n.º 39
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         scraper = cfscrape.create_scraper()
         r = scraper.get(url).content
         try:
             match = re.compile('<iframe.+?src="(.+?)://(.+?)/(.+?)"').findall(r)
             for http,host,url in match: 
                 host = host.replace('www.','')
                 url = '%s://%s/%s' % (http,host,url)
                 if 'seehd' in host: pass
                 else: sources.append({'source': host,'quality': 'HD','language': 'en','url': url,'direct': False,'debridonly': False}) 
         except:
             return
     except Exception:
         return
     return sources
Ejemplo n.º 40
0
    def resolve(self, url):
        if 'putlockers' in url:
            self.scraper = cfscrape.create_scraper()

            try:
                r = self.scraper.get(url).content
                v = re.findall('document.write\(Base64.decode\("(.+?)"\)',
                               r)[0]
                b64 = base64.b64decode(v)
                url = client.parseDOM(b64, 'iframe', ret='src')[0]
            except BaseException:
                r = self.scraper.get(url).content
                r = client.parseDOM(r, 'div', attrs={'class': 'player'})
                url = client.parseDOM(r, 'a', ret='href')[0]

            return url
        else:
            return url
Ejemplo n.º 41
0
 def movie(self, imdb, title, localtitle, aliases, year):
     try:
         scraper = cfscrape.create_scraper()
         clean_title = cleantitle.geturl(title).replace('-', '+')
         url = urlparse.urljoin(self.base_link, (self.search_link % clean_title))
         r = scraper.get(url).content
         r = dom_parser2.parse_dom(r, 'div', {'id': 'movie-featured'})
         r = [dom_parser2.parse_dom(i, 'a', req=['href']) for i in r if i]
         r = [(i[0].attrs['href'], re.search('Release:\s*(\d+)', i[0].content)) for i in r if i]
         r = [(i[0], i[1].groups()[0]) for i in r if i[0] and i[1]]
         r = [(i[0], i[1]) for i in r if i[1] == year]
         if r[0]:
             url = r[0][0]
             return url
         else:
             return
     except Exception:
         return
Ejemplo n.º 42
0
    def movie(self, imdb, title, localtitle, aliases, year):
        try:
            scraper = cfscrape.create_scraper()
            url = urlparse.urljoin(self.base_link, self.search_link)
            url = url  % (title.replace(':', ' ').replace(' ', '+'))

            search_results = client.request(url)
            tr_list = re.compile('(?i)<tr id="coverPreview.+?">(.+?)<\/tr>',re.DOTALL).findall(search_results)
            for row in tr_list:
                row_url = re.compile('href="(.+?)"',re.DOTALL).findall(row)[0]
                row_title = re.compile('href=".+?">(.+?)</a>',re.DOTALL).findall(row)[0]
                if cleantitle.get(title) in cleantitle.get(row_title):
                    if year in str(row):
                        ret_url = urlparse.urljoin(self.base_link, row_url)
                        return ret_url
            return
        except:
            return
Ejemplo n.º 43
0
 def searchShow(self, title, season):
     try:
         sea = '%s season %d' % (title, int(season))
         query = self.search_link % urllib.quote_plus(cleantitle.getsearch(sea))
         url = urlparse.urljoin(self.base_link, query)
         headers = {'User-Agent': client.agent(),
                    'Referer': self.base_link}
         scraper = cfscrape.create_scraper()
         r = scraper.get(url, headers=headers).content
         #r = client.request(url)
         r = client.parseDOM(r, 'item')
         r = [(client.parseDOM(i, 'title')[0], i) for i in r if i]
         r = [i[1] for i in r if sea.lower() in i[0].replace('  ', ' ').lower()]
         links = re.findall('''<h4>(EP\d+)</h4>.+?src="(.+?)"''', r[0], re.I | re.DOTALL)
         links = [(i[0], i[1].lstrip()) for i in links if i]
         return links
     except BaseException:
         return
Ejemplo n.º 44
0
 def search(self, title, year):
     try:
         url = urlparse.urljoin(
             self.base_link, self.search_link % (urllib.quote_plus(title)))
         headers = {'User-Agent': client.agent()}
         scraper = cfscrape.create_scraper()
         r = scraper.get(url, headers=headers).content
         r = dom_parser2.parse_dom(r, 'div', {'class': 'list_items'})[0]
         r = dom_parser2.parse_dom(r.content, 'li')
         r = [(dom_parser2.parse_dom(i, 'a', {'class': 'title'}))
              for i in r]
         r = [(i[0].attrs['href'], i[0].content) for i in r]
         r = [(urlparse.urljoin(self.base_link, i[0])) for i in r
              if cleantitle.get(title) in cleantitle.get(i[1])
              and year in i[1]]
         if r: return r[0]
         else: return
     except BaseException:
         return
Ejemplo n.º 45
0
    def tvshow(self, imdb, tvdb, tvshowtitle, localtvshowtitle, aliases, year):
        try:
            clean_title = cleantitle.geturl(tvshowtitle)
            search_url = self.search_link % (clean_title.replace('-','+'), year)
            self.scraper = cfscrape.create_scraper()
            r = self.scraper.get(search_url).content

            if 'To proceed, you must allow popups' in r:
                for i in range(0, 5):
                    r = self.scraper.get(search_url).content
                    if 'To proceed, you must allow popups' not in r: break
            r = dom_parser2.parse_dom(r, 'div', attrs={'class': 'title'})

            r = [dom_parser2.parse_dom(i, 'a', req='href') for i in r]
            r = [(urlparse.urljoin(self.base_link, i[0].attrs['href'])) for i in r if tvshowtitle.lower() in i[0].content.lower() and year in i[0].content]
            url = r[0]
            return url
        except:
            return
Ejemplo n.º 46
0
    def resolve(self, url):
        try:
            url = urlparse.urljoin(self.base_link, url)

            recap = recaptcha_app.recaptchaApp()

            key = recap.getSolutionWithDialog(
                url, "6LfV-ioUAAAAANOzmBWxMcw0tQQ4Ut6O6uA-Hi0d",
                self.recapInfo)
            print "Recaptcha2 Key: " + key

            if key != "" and "skipped" not in key.lower():
                link = cfscrape.create_scraper().get(url + '?token=%s' % key)
                if link.status_code != 404:
                    return link.url
            return
        except:
            source_faultlog.logFault(__name__, source_faultlog.tagResolve)
            return
Ejemplo n.º 47
0
    def searchMovie(self, title, year, aliases, headers):
            scraper = cfscrape.create_scraper()
            title = cleantitle.normalize(title)
            url = urlparse.urljoin(self.base_link, self.search_link % cleantitle.geturl(title))
            r = scraper.get(url).content
            r = client.parseDOM(r, 'div', attrs={'class': 'ml-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a', ret='oldtitle'))
            results = [(i[0], i[1], re.findall('\((\d{4})', i[1])) for i in r]
            try:
                r = [(i[0], i[1], i[2][0]) for i in results if len(i[2]) > 0]
                url = [i[0] for i in r if self.matchAlias(i[1], aliases) and (year == i[2])][0]
            except:
                url = None
                pass

            if (url is None):
                url = [i[0] for i in results if self.matchAlias(i[1], aliases)][0]

            url = urlparse.urljoin(self.base_link, '%s/watching.html' % url)
            return url
Ejemplo n.º 48
0
    def __init__(self):
        self.priority = 0
        self.language = ['en']
        self.domains = ['digbt.org']
        self.base_link = 'https://www.digbt.org'
        self.search_link = '/search/%s?c=video'
        self.scraper = cfscrape.create_scraper()

        self.pm_base_link = 'https://www.premiumize.me'
        self.pm_checkcache_link = '/api/torrent/checkhashes?apikey=%s&hashes[]=%s&apikey=%s'
        self.pm_dl_link = '/api/transfer/directdl?apikey=%s&src=%s'
        self.pm_api_key = control.setting('pmcached.apikey')

        self.rd_base_link = 'https://api.real-debrid.com'
        self.rd_checklib_link = '/rest/1.0/torrents?limit=100&auth_token=%s'
        self.rd_checkcache_link = '/rest/1.0/torrents/instantAvailability/%s?auth_token=%s'
        self.rd_addmagnet_link = '/rest/1.0/torrents/addMagnet?auth_token=%s'
        self.rd_torrentsinfo_link = '/rest/1.0/torrents/info/%s?auth_token=%s'
        self.rd_selectfiles_link = '/rest/1.0/torrents/selectFiles/%s?auth_token=%s'
        self.rd_unrestrict_link = '/rest/1.0/unrestrict/link/?auth_token=%s'
        self.rd_api_key = control.setting('rdcached.apikey')
Ejemplo n.º 49
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if not url:
                return

            url = urlparse.urljoin(self.base_link, url)
            scraper = cfscrape.create_scraper()
            data = scraper.get(url).content
            data = client.parseDOM(data, 'ul', attrs={'class': 'episodios'})
            links  = client.parseDOM(data, 'div', attrs={'class': 'episodiotitle'})
            sp = zip(client.parseDOM(data, 'div', attrs={'class': 'numerando'}), client.parseDOM(links, 'a', ret='href'))

            Sea_Epi = '%dx%d'% (int(season), int(episode))
            for i in sp:
                sep = i[0]
                if sep == Sea_Epi:
                    url = source_utils.strip_domain(i[1])

            return url
        except:
            return
Ejemplo n.º 50
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         scraper = cfscrape.create_scraper()
         r = scraper.get(url).content
         try:
             qual = re.compile('class="quality">(.+?)<').findall(r)
             print qual
             for i in qual:
                 if 'HD' in i:
                     quality = '1080p'
                 else:
                     quality = 'SD'
             match = re.compile('<iframe src="(.+?)"').findall(r)
             for url in match:
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 sources.append({'source': host,'quality': quality,'language': 'en','url': url,'direct': False,'debridonly': False})
         except:
             return
     except Exception:
         return
     return sources
Ejemplo n.º 51
0
 def resolve(self, url):
     try:
         scraper = cfscrape.create_scraper()
         data_dict = urlparse.parse_qs(url)
         data_dict = dict([(i, data_dict[i][0]) if data_dict[i] else (i, '') for i in data_dict])
         link = data_dict['link']
         post = data_dict['post']
         referer = data_dict['referer']
         for i in range(0, 5):
             scraper.get(referer).content
             getheaders =  {'Host': 'icefilms.unblocked.vc',
                             'Origin': 'https://icefilms.unblocked.mx',
                             'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
                             'Content-type': 'application/x-www-form-urlencoded',
                             'Referer': referer}
             r = scraper.post(link, data=post, headers=getheaders).text
             match = re.search('url=(http.*)', r)
             if match: 
                 return urllib.unquote_plus(match.group(1))
         return
     except:
         return
Ejemplo n.º 52
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         scraper = cfscrape.create_scraper()
         url = '%s/%s/' % (self.base_link, url)
         r = scraper.get(url).content
         try:
             match = re.compile('<iframe.+?src="(.+?)"').findall(r)
             for url in match:
                 sources.append({
                     'source': 'openload.co',
                     'quality': '1080p',
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         except:
             return
     except Exception:
         return
     return sources
Ejemplo n.º 53
0
    def searchShow(self, title, season, aliases, headers):
        try:
            title = cleantitle.normalize(title)
            search = '%s Season %01d' % (title, int(season))
            url = urlparse.urljoin(
                self.base_link, self.search_link %
                urllib.quote_plus(cleantitle.getsearch(search)))

            s = cfscrape.create_scraper()
            r = s.get(url).content
            r = client.parseDOM(r, 'li', attrs={'class': 'movie-item'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a', ret='title'))
            r = [(i[0], i[1], re.findall('(.*?)\s+-\s+Season\s+(\d)', i[1]))
                 for i in r]
            r = [(i[0], i[1], i[2][0]) for i in r if len(i[2]) > 0]
            url = [
                i[0] for i in r
                if self.matchAlias(i[2][0], aliases) and i[2][1] == season
            ][0]
            return url
        except BaseException:
            return
Ejemplo n.º 54
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         scraper = cfscrape.create_scraper()
         r = scraper.get(url).content
         try:
             match = re.compile("<iframe src='(.+?)://(.+?)/(.+?)'",
                                re.DOTALL).findall(r)
             for http, host, url in match:
                 url = '%s://%s/%s' % (http, host, url)
                 sources.append({
                     'source': host,
                     'quality': 'SD',
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         except:
             return
     except Exception:
         return
     return sources
Ejemplo n.º 55
0
    def _createSession(self, userAgent=None, cookies=None, referer=None):
        # Try to spoof a header from a web browser.
        session = cfscrape.create_scraper()
        session.headers.update({
            'Accept':
            self.DEFAULT_ACCEPT,
            'User-Agent':
            userAgent,
            'Accept-Language':
            'en-US,en;q=0.5',
            'Referer':
            referer if referer else self.base_link + '/',
            'Upgrade-Insecure-Requests':
            '1',
            'DNT':
            '1'
        })
        if cookies:
            session.cookies.update(cookies)
            session.cookies[
                ''] = '__test'  # See _getSearch() for more info on this.

        return session
Ejemplo n.º 56
0
	def sources(self, url, hostDict, hostprDict):
		try:
			sources = []
			search = self.base_link + '/' + url + '/'
			scraper = cfscrape.create_scraper()
			r = scraper.get(search).content
			try:
				match = re.compile('src="//ok\.ru/videoembed/(.+?)"').findall(r)
				for vid in match: 
					url = 'https://ok.ru/videoembed/' + vid
				
					sources.append({
						'source': 'ok',
						'quality': 'HD',
						'language': 'en',
						'url': url,
						'direct': False,
						'debridonly': False
					})
			except:
				return
		except Exception:
			return
		return sources
Ejemplo n.º 57
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         hostDict = hostprDict + hostDict
         scraper = cfscrape.create_scraper()
         r = scraper.get(url).content
         match = re.compile('<iframe.+?src="(.+?)://(.+?)/(.+?)"').findall(
             r)
         for http, host, url in match:
             host = host.replace('www.', '')
             url = '%s://%s/%s' % (http, host, url)
             valid, host = source_utils.is_host_valid(url, hostDict)
             if valid:
                 sources.append({
                     'source': host,
                     'quality': '720p',
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         return sources
     except:
         return sources
Ejemplo n.º 58
0
    def links_found(self,urls):
        try:
            scraper = cfscrape.create_scraper()
            links = []
            if type(urls) is list:
                for item in urls:
                    query = urlparse.urljoin(self.base_link, item)
                    r = scraper.get(query).content
                    data = client.parseDOM(r, 'div', attrs={'id': 'playex'})
                    data = client.parseDOM(data, 'div', attrs={'id': 'option-\d+'})
                    links += client.parseDOM(data, 'iframe', ret='src')
                    print links


            else:
                query = urlparse.urljoin(self.base_link, urls)
                r = scraper.get(query).content
                data = client.parseDOM(r, 'div', attrs={'id': 'playex'})
                data = client.parseDOM(data, 'div', attrs={'id': 'option-\d+'})
                links += client.parseDOM(data, 'iframe', ret='src')

            return links
        except:
            return urls
Ejemplo n.º 59
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            mozhdr = {
                'User-Agent':
                'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'
            }
            headers = mozhdr
            headers['X-Requested-With'] = 'XMLHttpRequest'

            self.s = cfscrape.create_scraper()
            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'],
                                      aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)

            headers['Referer'] = url
            ref_url = url
            mid = re.findall('-(\d*)\.', url)[0]
            data = {'id': mid}
            r = self.s.post(url, headers=headers)
            try:
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = self.s.get(u, headers=mozhdr).content
                r = json.loads(r)['html']
                rl = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                rh = client.parseDOM(r, 'div', attrs={'class': 'pas-header'})
                ids = client.parseDOM(rl, 'li', ret='data-id')
                servers = client.parseDOM(rl, 'li', ret='data-server')
                labels = client.parseDOM(rl, 'a', ret='title')
                r = zip(ids, servers, labels)
                rrr = zip(client.parseDOM(rh, 'li', ret='data-id'),
                          client.parseDOM(rh, 'li', ret='class'))
                types = {}
                for rr in rrr:
                    types[rr[0]] = rr[1]

                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?',
                                            eid[2].lower())[0]
                        except:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            t = str(int(time.time() * 1000))
                            quali = source_utils.get_release_quality(eid[2])[0]
                            if 'embed' in types[eid[1]]:
                                url = urlparse.urljoin(
                                    self.base_link, self.embed_link % (eid[0]))
                                xml = self.s.get(url, headers=headers).content
                                url = json.loads(xml)['src']
                                valid, hoster = source_utils.is_host_valid(
                                    url, hostDict)
                                if not valid: continue
                                q = source_utils.check_sd_url(url)
                                q = q if q != 'SD' else quali
                                sources.append({
                                    'source': hoster,
                                    'quality': q,
                                    'language': 'en',
                                    'url': url,
                                    'direct': False,
                                    'debridonly': False
                                })
                                continue
                            else:
                                url = urlparse.urljoin(
                                    self.base_link,
                                    self.token_link % (eid[0], mid, t))
                            script = self.s.get(url, headers=headers).content
                            if '$_$' in script:
                                params = self.uncensored1(script)
                            elif script.startswith('[]') and script.endswith(
                                    '()'):
                                params = self.uncensored2(script)
                            elif '_x=' in script:
                                x = re.search('''_x=['"]([^"']+)''',
                                              script).group(1)
                                y = re.search('''_y=['"]([^"']+)''',
                                              script).group(1)
                                params = {'x': x, 'y': y}
                            else:
                                raise Exception()
                            u = urlparse.urljoin(
                                self.base_link, self.source_link %
                                (eid[0], params['x'], params['y']))
                            length = 0
                            count = 0
                            while length == 0 and count < 11:
                                r = self.s.get(u, headers=headers).text
                                length = len(r)
                                if length == 0: count += 1
                            uri = None
                            uri = json.loads(r)['playlist'][0]['sources']
                            try:
                                uri = [i['file'] for i in uri if 'file' in i]
                            except:
                                try:
                                    uri = [uri['file']]
                                except:
                                    continue

                            for url in uri:
                                if 'googleapis' in url:
                                    q = source_utils.check_sd_url(url)
                                    sources.append({
                                        'source': 'gvideo',
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': True,
                                        'debridonly': False
                                    })
                                    continue

                                valid, hoster = source_utils.is_host_valid(
                                    url, hostDict)
                                #urls, host, direct = source_utils.check_directstreams(url, hoster)
                                q = quali
                                if valid:
                                    #for z in urls:
                                    if hoster == 'gvideo':
                                        direct = True
                                        try:
                                            q = directstream.googletag(
                                                url)[0]['quality']
                                        except:
                                            pass
                                        url = directstream.google(url,
                                                                  ref=ref_url)
                                    else:
                                        direct = False
                                    sources.append({
                                        'source': hoster,
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': direct,
                                        'debridonly': False
                                    })
                                else:
                                    sources.append({
                                        'source': 'CDN',
                                        'quality': q,
                                        'language': 'en',
                                        'url': url,
                                        'direct': True,
                                        'debridonly': False
                                    })
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources
Ejemplo n.º 60
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            scraper = cfscrape.create_scraper()

            if url is None:
                return sources

            if debrid.status() is False:
                return sources

            hostDict = hostprDict + hostDict

            data = parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            year = data['year']
            title = cleantitle.get_query(title)
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else year
            premDate = ''

            query = '%s S%02dE%02d' % (
                title, int(data['season']), int(data['episode'])
            ) if 'tvshowtitle' in data else '%s %s' % (title, year)
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
            query = query.replace(" ", "-")

            _base_link = self.base_link if int(
                year) >= 2021 else self.old_base_link

            #url = self.search_link % quote_plus(query)
            #url = urljoin(_base_link, url)

            url = _base_link + query

            r = scraper.get(url).content
            r = ensure_text(r, errors='replace')

            if r is None and 'tvshowtitle' in data:
                season = re.search('S(.*?)E', hdlr)
                season = season.group(1)
                query = title
                query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
                query = query + "-S" + season
                query = query.replace("&", "and")
                query = query.replace("  ", " ")
                query = query.replace(" ", "-")
                url = _base_link + query
                r = scraper.get(url).content
                r = ensure_text(r, errors='replace')

            for loopCount in list(range(0, 2)):
                if loopCount == 1 or (r is None and 'tvshowtitle' in data):

                    #premDate = re.sub('[ \.]', '-', data['premiered'])
                    query = re.sub(r'[\\\\:;*?"<>|/\-\']', '', title)
                    query = query.replace("&", " and ").replace(
                        "  ", " ").replace(
                            " ",
                            "-")  # throw in extra spaces around & just in case
                    #query = query + "-" + premDate

                    url = _base_link + query
                    url = url.replace('The-Late-Show-with-Stephen-Colbert',
                                      'Stephen-Colbert')

                    r = scraper.get(url).content
                    r = ensure_text(r, errors='replace')

                posts = client.parseDOM(r, "div", attrs={"class": "content"})
                #hostDict = hostprDict + hostDict
                items = []
                for post in posts:
                    try:
                        u = client.parseDOM(post, 'a', ret='href')
                        for i in u:
                            try:
                                name = str(i)
                                if hdlr in name.upper():
                                    items.append(name)
                                #elif len(premDate) > 0 and premDate in name.replace(".", "-"):
                                #items.append(name)
                            except:
                                pass
                    except:
                        pass

                if len(items) > 0:
                    break

            seen_urls = set()

            for item in items:
                try:
                    info = []

                    url = str(item)
                    url = client.replaceHTMLCodes(url)
                    url = ensure_text(url)

                    if url in seen_urls:
                        continue
                    seen_urls.add(url)

                    host = url.replace("\\", "")
                    host2 = host.strip('"')
                    host = re.findall('([\w]+[.][\w]+)$',
                                      urlparse(
                                          host2.strip().lower()).netloc)[0]

                    if host not in hostDict:
                        continue
                    if any(x in host2
                           for x in ['.rar', '.zip', '.iso', '.part']):
                        continue

                    quality, info = source_utils.get_release_quality(host2)

                    #try:
                    #    size = re.findall('((?:\d+\,\d+\.\d+|\d+\.\d+|\d+\,\d+|\d+)\s*(?:GiB|MiB|GB|MB))', post)[0]
                    #    div = 1 if size.endswith(('GB', 'GiB')) else 1024
                    #    size = float(re.sub('[^0-9|/.|/,]', '', size.replace(',', '.'))) / div
                    #    size = '%.2f GB' % size
                    #    info.append(size)
                    #except:
                    #    pass

                    info = ' | '.join(info)

                    host = client.replaceHTMLCodes(host)
                    host = ensure_text(host)
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': host2,
                        'info': info,
                        'direct': False,
                        'debridonly': True
                    })
                except:
                    pass
            check = [i for i in sources if not i['quality'] == 'CAM']
            if check:
                sources = check
            return sources
        except:
            return sources