Exemplo n.º 1
0
    def __search(self, titles, year):
        try:
            query = self.search_link % (urllib.quote_plus(
                cleantitle.getsearch(titles[0])))

            query = urlparse.urljoin(self.base_link, query)

            t = cleantitle.get(titles[0])
            scraper = cfscrape.create_scraper()
            data = scraper.get(query).content
            #data = client.request(query, referer=self.base_link)
            data = client.parseDOM(data, 'div', attrs={'class': 'result-item'})
            r = dom_parser.parse_dom(data, 'div', attrs={'class': 'title'})
            r = zip(
                dom_parser.parse_dom(r, 'a'),
                dom_parser.parse_dom(data, 'span', attrs={'class': 'year'}))

            url = []
            for i in range(len(r)):
                title = cleantitle.get(r[i][0][1])
                title = re.sub('(\d+p|4k|3d|hd|season\d+)', '', title)
                y = r[i][1][1]
                link = r[i][0][0]['href']
                if 'season' in title: continue
                if t == title and y == year:
                    if 'season' in link:
                        url.append(source_utils.strip_domain(link))
                        print url[0]
                        return url[0]
                    else:
                        url.append(source_utils.strip_domain(link))

            return url
        except:
            return
Exemplo n.º 2
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['xmovies8.tv', 'xmovies8.ru', 'xmovies8.es','xmovies8.nz']
     self.base_link = 'https://xmovies8.nu'
     self.search_link = '/movies/search?s=%s'
     self.scraper = cfscrape.create_scraper()
Exemplo n.º 3
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['0123putlocker.com']
     self.base_link = 'http://0123putlocker.com'
     self.search_link = '/search-movies/%s.html'
     self.scraper = cfscrape.create_scraper()
Exemplo n.º 4
0
    def episode(self, url, imdb, tvdb, title, premiered, season, episode):
        try:
            if not url:
                return

            url = urlparse.urljoin(self.base_link, url)
            scraper = cfscrape.create_scraper()
            data = scraper.get(url).content
            data = client.parseDOM(data, 'ul', attrs={'class': 'episodios'})
            links = client.parseDOM(data,
                                    'div',
                                    attrs={'class': 'episodiotitle'})
            sp = zip(
                client.parseDOM(data, 'div', attrs={'class': 'numerando'}),
                client.parseDOM(links, 'a', ret='href'))

            Sea_Epi = '%dx%d' % (int(season), int(episode))
            for i in sp:
                sep = i[0]
                if sep == Sea_Epi:
                    url = source_utils.strip_domain(i[1])

            return url
        except:
            return
Exemplo n.º 5
0
    def links_found(self, urls):
        try:
            scraper = cfscrape.create_scraper()
            links = []
            if type(urls) is list:
                for item in urls:
                    query = urlparse.urljoin(self.base_link, item)
                    r = scraper.get(query).content
                    data = client.parseDOM(r, 'div', attrs={'id': 'playex'})
                    data = client.parseDOM(data,
                                           'div',
                                           attrs={'id': 'option-\d+'})
                    links += client.parseDOM(data, 'iframe', ret='src')
                    print links

            else:
                query = urlparse.urljoin(self.base_link, urls)
                r = scraper.get(query).content
                data = client.parseDOM(r, 'div', attrs={'id': 'playex'})
                data = client.parseDOM(data, 'div', attrs={'id': 'option-\d+'})
                links += client.parseDOM(data, 'iframe', ret='src')

            return links
        except:
            return urls
Exemplo n.º 6
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['1080pmovie.com', 'watchhdmovie.net']
     self.base_link = 'https://watchhdmovie.net'
     self.search_link = '%s/%s-watch/'
     self.scraper = cfscrape.create_scraper()
Exemplo n.º 7
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         scraper = cfscrape.create_scraper()
         r = scraper.get(url).content
         try:
             qual = re.compile('class="quality">(.+?)<').findall(r)
             print qual
             for i in qual:
                 if 'HD' in i:
                     quality = '1080p'
                 else:
                     quality = 'SD'
             match = re.compile('<iframe src="(.+?)"').findall(r)
             for url in match:
                 valid, host = source_utils.is_host_valid(url, hostDict)
                 sources.append({
                     'source': host,
                     'quality': quality,
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         except:
             return
     except Exception:
         return
     return sources
Exemplo n.º 8
0
 def __init__(self):
     self.priority = 1
     self.language = ['en']
     self.domains = ['http://getmypopcornnow.xyz', 'getmypopcornnow.xyz']
     self.base_link = 'http://pubfilmonline.net'
     self.ajax_link = '/wp-admin/admin-ajax.php'
     self.search_link = '/?s=%s'
     self.scraper = cfscrape.create_scraper()
Exemplo n.º 9
0
 def mz_server(self, url):
     try:
         scraper = cfscrape.create_scraper()
         urls = []
         data = scraper.get(url).content
         data = re.findall('''file:\s*["']([^"']+)",label:\s*"(\d{3,}p)"''',
                           data, re.DOTALL)
         for url, label in data:
             label = source_utils.label_to_quality(label)
             if label == 'SD': continue
             urls.append({'url': url, 'quality': label})
         return urls
     except:
         return url
Exemplo n.º 10
0
 def resolve(self, url):
     try:
         scraper = cfscrape.create_scraper()
         data_dict = urlparse.parse_qs(url)
         data_dict = dict([(i, data_dict[i][0]) if data_dict[i] else (i, '') for i in data_dict])
         link = data_dict['link']
         post = data_dict['post']
         referer = data_dict['referer']
         for i in range(0, 5):
             client.request(referer)
             getheaders = {'Host': 'icefilms.unblocked.vc',
                           'Origin': 'https://icefilms.unblocked.mx',
                           'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36',
                           'Content-type': 'application/x-www-form-urlencoded',
                           'Referer': referer}
             r = client.request(link, post=post, headers=getheaders)
             match = re.search('url=(http.*)', r)
             if match:
                 return urllib.unquote_plus(match.group(1))
         return
     except:
         return
Exemplo n.º 11
0
 def sources(self, url, hostDict, hostprDict):
     try:
         sources = []
         scraper = cfscrape.create_scraper()
         r = client.request(url)
         try:
             match = re.compile("<iframe src='(.+?)://(.+?)/(.+?)'",
                                re.DOTALL).findall(r)
             for http, host, url in match:
                 url = '%s://%s/%s' % (http, host, url)
                 sources.append({
                     'source': host,
                     'quality': 'SD',
                     'language': 'en',
                     'url': url,
                     'direct': False,
                     'debridonly': False
                 })
         except:
             return
     except Exception:
         return
     return sources
Exemplo n.º 12
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            scraper = cfscrape.create_scraper()

            if url == None: return sources

            if debrid.status() == False: raise Exception()

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            hdlr = 'S%02dE%02d' % (int(data['season']), int(
                data['episode'])) if 'tvshowtitle' in data else data['year']
            premDate = ''

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])
            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)

            query = query.replace("&", "and")
            query = query.replace("  ", " ")
            query = query.replace(" ", "-")

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            url = "http://rlsbb.ru/" + query
            if 'tvshowtitle' not in data: url = url + "-1080p"

            r = scraper.get(url).content

            if r == None and 'tvshowtitle' in data:
                season = re.search('S(.*?)E', hdlr)
                season = season.group(1)
                query = title
                query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', '', query)
                query = query + "-S" + season
                query = query.replace("&", "and")
                query = query.replace("  ", " ")
                query = query.replace(" ", "-")
                url = "http://rlsbb.ru/" + query
                r = scraper.get(url).content

            for loopCount in range(0, 2):
                if loopCount == 1 or (r == None and 'tvshowtitle' in data):
                    premDate = re.sub('[ \.]', '-', data['premiered'])
                    query = re.sub('[\\\\:;*?"<>|/\-\']', '',
                                   data['tvshowtitle'])
                    query = query.replace("&", " and ").replace("  ",
                                                                " ").replace(
                                                                    " ", "-")
                    query = query + "-" + premDate

                    url = "http://rlsbb.to/" + query
                    url = url.replace('The-Late-Show-with-Stephen-Colbert',
                                      'Stephen-Colbert')

                    r = scraper.get(url).content

                posts = client.parseDOM(r, "div", attrs={"class": "content"})
                hostDict = hostprDict + hostDict
                items = []
                for post in posts:
                    try:
                        u = client.parseDOM(post, 'a', ret='href')
                        for i in u:
                            try:
                                name = str(i)
                                if hdlr in name.upper():
                                    items.append(name)
                                elif len(premDate
                                         ) > 0 and premDate in name.replace(
                                             ".", "-"):
                                    items.append(name)

                            except:
                                pass
                    except:
                        pass

                if len(items) > 0: break

            seen_urls = set()

            for item in items:
                try:
                    info = []

                    url = str(item)
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    if url in seen_urls: continue
                    seen_urls.add(url)

                    host = url.replace("\\", "")
                    host2 = host.strip('"')
                    host = re.findall(
                        '([\w]+[.][\w]+)$',
                        urlparse.urlparse(host2.strip().lower()).netloc)[0]

                    if not host in hostDict: raise Exception()
                    if any(x in host2 for x in ['.rar', '.zip', '.iso']):
                        continue

                    if '720p' in host2:
                        quality = 'HD'
                    elif '1080p' in host2:
                        quality = '1080p'
                    elif '2160p' in host2:
                        quality = '4K'
                    else:
                        quality = 'SD'

                    info = ' | '.join(info)
                    host = client.replaceHTMLCodes(host)
                    host = host.encode('utf-8')
                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': host2,
                        'info': info,
                        'direct': False,
                        'debridonly': False
                    })

                except:
                    pass
            check = [i for i in sources if not i['quality'] == 'CAM']
            if check: sources = check
            return sources
        except:
            return sources
Exemplo n.º 13
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []
            # if no link returned in movie and tvshow searches, nothing to do here, return out.
            if url == None or len(url) == 0:
                return sources

            # Time to get scraping
            title = url['title']
            year = url['title']
            # Build up the search url
            searchLink = self.search_link + title + ' ' + year
            url = urlparse.urljoin(self.base_link, searchLink)

            # Site uses cf, so we need cfscrape
            scraper = cfscrape.create_scraper()
            html = scraper.get(url).content
            result_soup = BeautifulSoup(html, "html.parser")

            # Parse the table of results
            table = result_soup.find("table")
            table_body = table.find("tbody")
            rows = table_body.findAll("tr")
            fileLinks = []
            for row in rows:
                cols = row.findAll("td")
                for col in cols:
                    links = col.findAll("a", href=True)
                    for link in links:
                        if "/file/" in link['href']:
                            # Use this onse
                            fileLinks.append(link['href'])
                            break
            # Retrieve actual links from result pages
            actualLinks = []
            for fileLink in fileLinks:
                actual_url = urlparse.urljoin(self.base_link, fileLink)
                html = scraper.get(actual_url.encode('ascii')).content
                linkSoup = BeautifulSoup(html, "html.parser")
                link = str(
                    linkSoup.find(
                        "button",
                        {"title": "Copy Link"})['data-clipboard-text'])
                # Exclude zip and rar files
                if link.lower().endswith('rar') or link.lower().endswith(
                        'zip'):
                    continue
                else:
                    actualLinks.append(link)

            for link in actualLinks:
                quality, info = source_utils.get_release_quality(link, url)
                sources.append({
                    'source': 'DIRECT',
                    'quality': quality,
                    'language': 'en',
                    'url': link,
                    'info': info,
                    'direct': True,
                    'debridonly': False
                })
            return sources
        except Exception as e:
            # log_utils.log('EXCEPTION MSG: '+str(e))
            return sources
Exemplo n.º 14
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            mozhdr = {
                'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
            headers = mozhdr
            headers['X-Requested-With'] = 'XMLHttpRequest'

            self.s = cfscrape.create_scraper()
            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            headers['Referer'] = url
            ref_url = url
            mid = re.findall('-(\d*)\.', url)[0]
            data = {'id': mid}
            r = self.s.post(url, headers=headers)
            try:
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = self.s.get(u, headers=mozhdr).content
                r = json.loads(r)['html']
                rl = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                rh = client.parseDOM(r, 'div', attrs={'class': 'pas-header'})
                ids = client.parseDOM(rl, 'li', ret='data-id')
                servers = client.parseDOM(rl, 'li', ret='data-server')
                labels = client.parseDOM(rl, 'a', ret='title')
                r = zip(ids, servers, labels)
                rrr = zip(client.parseDOM(rh, 'li', ret='data-id'), client.parseDOM(rh, 'li', ret='class'))
                types = {}
                for rr in rrr:
                    types[rr[0]] = rr[1]

                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?', eid[2].lower())[0]
                        except:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            t = str(int(time.time() * 1000))
                            quali = source_utils.get_release_quality(eid[2])[0]
                            if 'embed' in types[eid[1]]:
                                url = urlparse.urljoin(self.base_link, self.embed_link % (eid[0]))
                                xml = self.s.get(url, headers=headers).content
                                url = json.loads(xml)['src']
                                valid, hoster = source_utils.is_host_valid(url, hostDict)
                                if not valid: continue
                                q = source_utils.check_sd_url(url)
                                q = q if q != 'SD' else quali
                                sources.append(
                                    {'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': False,
                                     'debridonly': False})
                                continue
                            else:
                                url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid, t))
                            script = self.s.get(url, headers=headers).content
                            if '$_$' in script:
                                params = self.uncensored1(script)
                            elif script.startswith('[]') and script.endswith('()'):
                                params = self.uncensored2(script)
                            elif '_x=' in script:
                                x = re.search('''_x=['"]([^"']+)''', script).group(1)
                                y = re.search('''_y=['"]([^"']+)''', script).group(1)
                                params = {'x': x, 'y': y}
                            else:
                                raise Exception()
                            u = urlparse.urljoin(self.base_link, self.source_link % (eid[0], params['x'], params['y']))
                            length = 0
                            count = 0
                            while length == 0 and count < 11:
                                r = self.s.get(u, headers=headers).text
                                length = len(r)
                                if length == 0: count += 1
                            uri = None
                            uri = json.loads(r)['playlist'][0]['sources']
                            try:
                                uri = [i['file'] for i in uri if 'file' in i]
                            except:
                                try:
                                    uri = [uri['file']]
                                except:
                                    continue

                            for url in uri:
                                if 'googleapis' in url:
                                    q = source_utils.check_sd_url(url)
                                    sources.append(
                                        {'source': 'gvideo', 'quality': q, 'language': 'en', 'url': url, 'direct': True,
                                         'debridonly': False})
                                    continue

                                valid, hoster = source_utils.is_host_valid(url, hostDict)
                                # urls, host, direct = source_utils.check_directstreams(url, hoster)
                                q = quali
                                if valid:
                                    # for z in urls:
                                    if hoster == 'gvideo':
                                        direct = True
                                        try:
                                            q = directstream.googletag(url)[0]['quality']
                                        except:
                                            pass
                                        url = directstream.google(url, ref=ref_url)
                                    else:
                                        direct = False
                                    sources.append(
                                        {'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': direct,
                                         'debridonly': False})
                                else:
                                    sources.append(
                                        {'source': 'CDN', 'quality': q, 'language': 'en', 'url': url, 'direct': True,
                                         'debridonly': False})
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources
Exemplo n.º 15
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)

            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            hdlr = 'Season %d' % int(
                data['season']) if 'tvshowtitle' in data else data['year']

            query = '%s S%02dE%02d' % (
                data['tvshowtitle'], int(data['season']),
                int(data['episode'])) if 'tvshowtitle' in data else '%s %s' % (
                    data['title'], data['year'])

            query = re.sub('(\\\|/| -|:|;|\*|\?|"|\'|<|>|\|)', ' ', query)

            url = self.search_link % urllib.quote_plus(query)
            url = urlparse.urljoin(self.base_link, url)

            self.scraper = cfscrape.create_scraper()
            r = self.scraper.get(url).content
            posts = client.parseDOM(r, 'li')

            for post in posts:
                try:
                    data = dom_parser2.parse_dom(post, 'a', req='href')[0]
                    t = re.findall('title=.+?>\s*(.+?)$', data.content,
                                   re.DOTALL)[0]
                    t2 = re.sub(
                        '(\.|\(|\[|\s)(\d{4}|S\d*E\d*|S\d*|3D)(\.|\)|\]|\s|)(.+|)',
                        '', t)
                    y = re.findall(
                        '[\.|\(|\[|\s](S\d*E\d*|Season\s*\d*|\d{4})[\.|\)|\]|\s]',
                        t)[-1]

                    if not (cleantitle.get_simple(
                            t2.replace('720p / 1080p', ''))
                            == cleantitle.get(title) and y == hdlr):
                        raise Exception()

                    link = client.parseDOM(post, 'a', ret='href')[0]
                    if not 'Episodes' in post: u = self.movie_links(link)
                    else:
                        sep = 'S%02dE%02d' % (int(
                            data['season']), int(data['episode']))
                        u = self.show_links(link, sep)

                    for item in u:
                        quality, info = source_utils.get_release_quality(
                            item[0][0], None)
                        try:
                            size = re.findall(
                                '((?:\d+\.\d+|\d+\,\d+|\d+) [M|G]B)',
                                item[0][1])[-1]
                            div = 1 if size.endswith(' GB') else 1024
                            size = float(re.sub('[^0-9|/.|/,]', '',
                                                size)) / div
                            size = '%.2f GB' % size
                            info.append(size)
                        except:
                            pass

                        info = ' | '.join(info)

                        url = item[0][0]
                        url = client.replaceHTMLCodes(url)
                        url = url.encode('utf-8')

                        sources.append({
                            'source': 'popcorn',
                            'quality': quality,
                            'language': 'en',
                            'url': url,
                            'info': info,
                            'direct': True,
                            'debridonly': False
                        })
                except:
                    pass

            return sources
        except:
            return sources