Пример #1
0
def check_directstreams(url, hoster='', quality='SD'):
    urls = []
    host = hoster

    if 'google' in url or any(x in url for x in ['youtube.', 'docid=']):
        urls = directstream.google(url)
        if not urls:
            tag = directstream.googletag(url)
            if tag: urls = [{'quality': tag[0]['quality'], 'url': url}]
        if urls: host = 'gvideo'
    elif 'ok.ru' in url:
        urls = directstream.odnoklassniki(url)
        if urls: host = 'vk'
    elif 'vk.com' in url:
        urls = directstream.vk(url)
        if urls: host = 'vk'
    elif any(x in url for x in ['akamaized', 'blogspot', 'ocloud.stream']):
        urls = [{'url': url}]
        if urls: host = 'CDN'

    direct = True if urls else False

    if not urls: urls = [{'quality': quality, 'url': url}]

    return urls, host, direct
Пример #2
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            referer = urlparse.urljoin(self.base_link, url)

            try:
                post = urlparse.parse_qs(urlparse.urlparse(referer).query).values()[0][0]
            except:
                post = referer.strip('/').split('/')[-1].split('watch_', 1)[-1].rsplit('#')[0].rsplit('.')[0]

            post = urllib.urlencode({'v': post})

            url = urlparse.urljoin(self.base_link, '/video_info/iframe')

            r = client.request(url, post=post, XHR=True, referer=url)
            r = json.loads(r).values()
            r = [urllib.unquote(i.split('url=')[-1]) for i in r]

            for i in r:
                try:
                    sources.append(
                        {'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Afdah',
                         'url': i, 'direct': True, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Пример #3
0
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            content = re.compile('(.+?)\?episode=\d*$').findall(url)
            content = 'movie' if len(content) == 0 else 'episode'

            try:
                url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(
                    url)[0]
            except:
                pass

            result = client.request(url)
            result = result.replace('"target="EZWebPlayer"',
                                    '" target="EZWebPlayer"')
            url = zip(
                client.parseDOM(result,
                                'a',
                                ret='href',
                                attrs={'target': 'EZWebPlayer'}),
                client.parseDOM(result, 'a', attrs={'target': 'EZWebPlayer'}))
            url = [(i[0], re.compile('(\d+)').findall(i[1])) for i in url]
            url = [(i[0], i[1][-1]) for i in url if len(i[1]) > 0]

            if content == 'episode':
                url = [i for i in url if i[1] == '%01d' % int(episode)]

            links = [client.replaceHTMLCodes(i[0]) for i in url]

            for u in links:
                try:
                    result = client.request(u)
                    result = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    result = re.findall('"file"\s*:\s*"(.+?)"', result)

                    for url in result:
                        try:
                            url = url.replace('\\', '')
                            url = directstream.googletag(url)[0]
                            sources.append({
                                'source': 'gvideo',
                                'quality': url['quality'],
                                'provider': 'Pubfilm',
                                'url': url['url'],
                                'direct': True,
                                'debridonly': False
                            })
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
Пример #4
0
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            for i in range(3):
                result = client.request(url)
                if not result == None: break

            result = re.sub(r'[^\x00-\x7F]+', ' ', result)

            pages = client.parseDOM(result, 'div', attrs = {'class': 'menu'})
            pages = client.parseDOM(pages, 'div', ret='data-id')

            for page in pages:
                try:
                    url = urlparse.urljoin(self.base_link, self.video_link)
                    post = 'id=%s' % page

                    for i in range(3):
                        result = client.request(url, post=post)
                        if not result == None: break

                    url = client.parseDOM(result, 'iframe', ret='src')[0]

                    if 'openload.io' in url or 'openload.co' in url or 'oload.tv' in url:
                        sources.append({'source': 'openload.co', 'quality': 'HD', 'provider': 'Sezonlukdizi', 'url': url, 'direct': False, 'debridonly': False})

                    if not '.asp' in url: raise Exception()

                    for i in range(3):
                        result = client.request(url)
                        if not result == None: break

                    captions = re.search('kind\s*:\s*(?:\'|\")captions(?:\'|\")', result)
                    if not captions: raise Exception()

                    links = re.findall('"?file"?\s*:\s*"([^"]+)"', result)

                    for url in links:
                        try:
                            if not url.startswith('http'):
                                url = client.request(url, output='geturl')
                            url = url.replace('\\', '')
                            url = directstream.googletag(url)[0]
                            sources.append({'source': 'gvideo', 'quality': url['quality'], 'provider': 'Sezonlukdizi', 'url': url['url'], 'direct': True, 'debridonly': False})
                        except:
                            pass

                except:
                    pass

            return sources
        except:
            return sources
Пример #5
0
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            url = data['url']
            episode = int(data['episode'])

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            p = client.request(url, timeout='10')

            if episode > 0:
                r = client.parseDOM(p, 'div', attrs={'class': 'ep_link.+?'})[0]
                r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
                r = [(i[0], re.findall('Episode\s+(\d+)', i[1])) for i in r]
                r = [(i[0], i[1][0]) for i in r]
                r = [i[0] for i in r if int(i[1]) == episode][0]
                p = client.request(r, timeout='10')

            p = re.findall('load_player\((\d+)\)', p)
            p = urllib.urlencode({'id': p[0]})
            headers = {'Referer': url}
            r = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v3')
            r = client.request(r, post=p, headers=headers, XHR=True, timeout='10')
            url = json.loads(r)['value']
            url = client.request(url, headers=headers, XHR=True, output='geturl', timeout='10')

            if 'openload.io' in url or 'openload.co' in url or 'oload.tv' in url:
                sources.append({'source': 'openload.co', 'quality': 'HD', 'provider': '1movies', 'url': url, 'direct': False,'debridonly': False})
                raise Exception()

            r = client.request(url, headers=headers, XHR=True, timeout='10')
            try:
                src = json.loads(r)['playlist'][0]['sources']
                links = [i['file'] for i in src if 'file' in i]
                for i in links:
                    try:
                        sources.append(
                            {'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': '1movies',
                             'url': i, 'direct': True, 'debridonly': False})
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources
Пример #6
0
    def resolve(self, url, resolverList=None):
        logger.debug('%s ORIGINAL URL [%s]' % (__name__, url))
        try:
            url = urlparse.urljoin(self.base_link, url)
            result = client.request(url)
        except:
            pass

        try:
            url = re.compile(
                '"?file"?\s*=\s*"(.+?)"\s+"?label"?\s*=\s*"(\d+)p?"').findall(
                    result)

            url = [(int(i[1]), i[0]) for i in url]
            url = sorted(url, key=lambda k: k[0])
            url = url[-1][1]

            try:
                u = client.request(url, output='headers',
                                   redirect=False)['Location']
            except:
                u = client.request(url, output='geturl')

            q = directstream.googletag(u)[0]['quality']

            url = u

            if 'requiressl=yes' in url:
                url = url.replace('http://', 'https://')
            else:
                url = url.replace('https://', 'http://')
            return url
        except:
            pass

        try:
            url = re.compile('file\s*=\s*"(.+?)"').findall(result)[0]
            if self.base_link in url: raise Exception()
            url = client.replaceHTMLCodes(url)
            return url
        except:
            pass

        try:
            url = json.loads(result)['embed_url']
            logger.debug('%s RESOLVED URL [%s]' % (__name__, url))
            return url
        except:
            pass
Пример #7
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            url = urlparse.urljoin(self.base_link,
                                   '/sources?%s' % urllib.urlencode(data))
            r = client.request(url)
            if not r: raise Exception()
            result = json.loads(r)
            try:
                gvideos = [i['url'] for i in result if i['source'] == 'GVIDEO']
                for url in gvideos:
                    gtag = directstream.googletag(url)[0]
                    sources.append({
                        'source': 'gvideo',
                        'quality': gtag['quality'],
                        'language': 'en',
                        'url': gtag['url'],
                        'direct': True,
                        'debridonly': False
                    })
            except:
                pass

            try:
                oloads = [i['url'] for i in result if i['source'] == 'CDN']
                for url in oloads:
                    sources.append({
                        'source': 'CDN',
                        'quality': 'HD',
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
            except:
                pass

            return sources
        except:
            return sources
Пример #8
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            r = client.parseDOM(r, 'iframe', ret='src')

            for u in r:
                try:
                    if not u.startswith('http') and not 'vidstreaming' in u:
                        raise Exception()

                    url = client.request(u)
                    url = client.parseDOM(url, 'source', ret='src')

                    for i in url:
                        try:
                            sources.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'language':
                                'en',
                                'url':
                                i,
                                'direct':
                                True,
                                'debridonly':
                                False
                            })
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            failure = traceback.format_exc()
            print('GoGoAnime - Exception: \n' + str(failure))
            return sources
Пример #9
0
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            content = re.compile('(.+?)\?episode=\d*$').findall(url)
            content = 'movie' if len(content) == 0 else 'episode'

            try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
            except: pass

            result = client.request(url)
            result = result.replace('"target="EZWebPlayer"', '" target="EZWebPlayer"')
            url = zip(client.parseDOM(result, 'a', ret='href', attrs={'target': 'EZWebPlayer'}), client.parseDOM(result, 'a', attrs={'target': 'EZWebPlayer'}))
            url = [(i[0], re.compile('(\d+)').findall(i[1])) for i in url]
            url = [(i[0], i[1][-1]) for i in url if len(i[1]) > 0]

            if content == 'episode':
                url = [i for i in url if i[1] == '%01d' % int(episode)]

            links = [client.replaceHTMLCodes(i[0]) for i in url]


            for u in links:
                try:
                    result = client.request(u)
                    result = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    result = re.findall('"file"\s*:\s*"(.+?)"', result)

                    for url in result:
                        try:
                            url = url.replace('\\', '')
                            url = directstream.googletag(url)[0]
                            sources.append({'source': 'gvideo', 'quality': url['quality'], 'provider': 'Pubfilm', 'url': url['url'], 'direct': True, 'debridonly': False})
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
Пример #10
0
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            url = path = re.sub('/watching.html$', '', url.strip('/'))
            url = referer = url + '/watching.html'

            p = client.request(url)
            p = re.findall("data\s*:\s*{\s*id:\s*(\d+),\s*episode_id:\s*(\d+),\s*link_id:\s*(\d+)", p)[0]
            p = urllib.urlencode({'id': p[0], 'episode_id': p[1], 'link_id': p[2], '_': int(time.time() * 1000)})

            headers = {
            'Accept-Formating': 'application/json, text/javascript',
            'Server': 'cloudflare-nginx',
            'Referer': referer}

            r = urlparse.urljoin(self.base_link, '/ajax/movie/load_episodes')
            r = client.request(r, post=p, headers=headers, XHR=True)
            r = re.findall("load_player\(\s*'([^']+)'\s*,\s*'?(\d+)\s*'?", r)
            r = [i for i in r if int(i[1]) >= 720]

            for u in r:
                try:
                    p = urllib.urlencode({'id': u[0], 'quality': u[1], '_': int(time.time() * 1000)})
                    u = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v2')

                    u = client.request(u, post=p, headers=headers, XHR=True)
                    u = json.loads(u)['playlist']
                    u = client.request(u, headers=headers, XHR=True)
                    u = json.loads(u)['playlist'][0]['sources']
                    u = [i['file'] for i in u if 'file' in i]

                    for i in u:
                        try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'XMovies', 'url': i, 'direct': True, 'debridonly': False})
                        except: pass
                except:
                    pass

            return sources
        except:
            return sources
Пример #11
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

            title = data['title'];
            year = data['year']

            h = {'User-Agent': client.randomagent()}

            v = '%s_%s' % (cleantitle.geturl(title).replace('-', '_'), year)

            url = '/watch_%s.html' % v
            url = urlparse.urljoin(self.base_link, url)

            c = client.request(url, headers=h, output='cookie')
            c = client.request(urlparse.urljoin(self.base_link, '/av'), cookie=c, output='cookie', headers=h,
                               referer=url)
            # c = client.request(url, cookie=c, headers=h, referer=url, output='cookie')

            post = urllib.urlencode({'v': v})
            u = urlparse.urljoin(self.base_link, '/video_info/frame')

            # r = client.request(u, post=post, cookie=c, headers=h, XHR=True, referer=url)
            r = client.request(u, post=post, headers=h, XHR=True, referer=url)
            r = json.loads(r).values()
            r = [urllib.unquote(i.split('url=')[-1]) for i in r]

            for i in r:
                try:
                    sources.append(
                        {'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'language': 'en',
                         'url': i, 'direct': True, 'debridonly': False})
                except:
                    pass

            return sources
        except:
            return sources
Пример #12
0
    def resolve(self, url, resolverList=None):
        logger.debug('%s ORIGINAL URL [%s]' % (__name__, url))
        try:
            url = urlparse.urljoin(self.base_link, url)
            result = client.request(url)
        except:
            pass

        try:
            url = re.compile('"?file"?\s*=\s*"(.+?)"\s+"?label"?\s*=\s*"(\d+)p?"').findall(result)

            url = [(int(i[1]), i[0]) for i in url]
            url = sorted(url, key=lambda k: k[0])
            url = url[-1][1]

            try: u = client.request(url, output='headers', redirect=False)['Location']
            except: u = client.request(url, output='geturl')

            q = directstream.googletag(u)[0]['quality']

            url = u

            if 'requiressl=yes' in url: url = url.replace('http://', 'https://')
            else: url = url.replace('https://', 'http://')
            return url
        except:
            pass

        try:
            url = re.compile('file\s*=\s*"(.+?)"').findall(result)[0]
            if self.base_link in url: raise Exception()
            url = client.replaceHTMLCodes(url)
            return url
        except:
            pass

        try:
            url = json.loads(result)['embed_url']
            logger.debug('%s RESOLVED URL [%s]' % (__name__, url))
            return url
        except:
            pass
Пример #13
0
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            r = client.parseDOM(r, 'div', attrs = {'class': 'screen fluid-width-video-wrapper'})[0]
            r = re.findall('src\s*=\s*"(.*?)"', r)[0]

            r = urlparse.urljoin(self.base_link, r)

            r = client.request(r, referer=url)

            links = []

            url = re.findall('src\s*=\s*"(.*?)"', r)
            url = [i for i in url if 'http' in i]

            for i in url:
                try: links += [{'source': 'gvideo', 'url': i, 'quality': directstream.googletag(i)[0]['quality'], 'direct': True}]
                except: pass

            url = re.findall('(openload\.(?:io|co)/(?:embed|f)/[0-9a-zA-Z-_]+)', r)
            url = ['http://' + i for i in url]

            for i in url:
                try: links += [{'source': 'openload.co', 'url': i, 'quality': 'HD', 'direct': False}]
                except: pass

            for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Rainierland', 'url': i['url'], 'direct': i['direct'], 'debridonly': False})

            return sources
        except:
            return sources
Пример #14
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']
            imdb = data['imdb']
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                url = self.searchShow(title, int(data['season']),
                                      int(data['episode']), aliases, headers)
            else:
                url = self.searchMovie(title, data['year'], aliases, headers)

            r = client.request(url,
                               headers=headers,
                               output='extended',
                               timeout='10')

            if not imdb in r[0]: raise Exception()

            cookie = r[4]
            headers = r[3]
            result = r[0]

            try:
                r = re.findall('(https:.*?redirector.*?)[\'\"]', result)
                for i in r:
                    try:
                        sources.append({
                            'source':
                            'gvideo',
                            'quality':
                            directstream.googletag(i)[0]['quality'],
                            'language':
                            'en',
                            'url':
                            i,
                            'direct':
                            True,
                            'debridonly':
                            False
                        })
                    except:
                        pass
            except:
                pass

            try:
                auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
            except:
                auth = 'false'
            auth = 'Bearer %s' % urllib.unquote_plus(auth)
            headers['Authorization'] = auth
            headers['Referer'] = url

            u = '/ajax/vsozrflxcw.php'
            self.base_link = client.request(self.base_link,
                                            headers=headers,
                                            output='geturl')
            u = urlparse.urljoin(self.base_link, u)

            action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'

            elid = urllib.quote(
                base64.encodestring(str(int(time.time()))).strip())

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]

            post = {
                'action': action,
                'idEl': idEl,
                'token': token,
                'nopop': '',
                'elid': elid
            }
            post = urllib.urlencode(post)
            cookie += ';%s=%s' % (idEl, elid)
            headers['Cookie'] = cookie

            r = client.request(u,
                               post=post,
                               headers=headers,
                               cookie=cookie,
                               XHR=True)
            r = str(json.loads(r))

            r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r)

            for i in r:
                try:
                    if 'google' in i:
                        quality = 'SD'

                        if 'googleapis' in i:
                            try:
                                quality = source_utils.check_sd_url(i)
                            except Exception:
                                pass

                        if 'googleusercontent' in i:
                            i = directstream.googleproxy(i)
                            try:
                                quality = directstream.googletag(
                                    i)[0]['quality']
                            except Exception:
                                pass

                        sources.append({
                            'source': 'gvideo',
                            'quality': quality,
                            'language': 'en',
                            'url': i,
                            'direct': True,
                            'debridonly': False
                        })

                    elif 'llnwi.net' in i or 'vidcdn.pro' in i:
                        try:
                            quality = source_utils.check_sd_url(i)

                            sources.append({
                                'source': 'CDN',
                                'quality': quality,
                                'language': 'en',
                                'url': i,
                                'direct': True,
                                'debridonly': False
                            })

                        except Exception:
                            pass
                    else:
                        valid, hoster = source_utils.is_host_valid(i, hostDict)
                        if not valid: continue

                        sources.append({
                            'source': hoster,
                            'quality': '720p',
                            'language': 'en',
                            'url': i,
                            'direct': False,
                            'debridonly': False
                        })
                except Exception:
                    pass
            return sources
        except:
            failure = traceback.format_exc()
            print('CartoonHD - Exception: \n' + str(failure))
            return sources
Пример #15
0
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            headers = {}
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

            if 'tvshowtitle' in data:
                url = '/tv-series/%s-season-%01d/watch/' % (cleantitle.geturl(title), int(data['season']))
                year = str((int(data['year']) + int(data['season'])) - 1)
                episode = '%01d' % int(data['episode'])

            else:
                url = '/movie/%s/watch/' % cleantitle.geturl(title)
                year = data['year']
                episode = None

            url = urlparse.urljoin(self.base_link, url)
            referer = url

            r = client.request(url)

            y = re.findall('Release\s*:\s*.+?\s*(\d{4})', r)[0]

            if not year == y: raise Exception()


            r = client.parseDOM(r, 'div', attrs = {'class': 'les-content'})
            r = zip(client.parseDOM(r, 'a', ret='href'), client.parseDOM(r, 'a'))
            r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r]

            if not episode == None:
                r = [i[0] for i in r if '%01d' % int(i[1]) == episode]
            else:
                r = [i[0] for i in r]

            r = [i for i in r if '/server-' in i]

            for u in r:
                try:
                    p = client.request(u, referer=referer, timeout='10')

                    t = re.findall('player_type\s*:\s*"(.+?)"', p)[0]
                    if t == 'embed': raise Exception()

                    episodeId = client.parseDOM(p, 'input', ret='value', attrs = {'name': 'episodeID'})[0]
                    js = json.loads(client.request(self.token_link,post=urllib.urlencode({'id': episodeId}), referer=referer, timeout='10'))
                    hash = js['hash']
                    token = js['token']
                    _ = js['_']
                    url = self.grabber_link % (episodeId, hash, token, _)
                    u = client.request(url, referer=referer, timeout='10')
                    js = json.loads(u)

                    try:
                        u = js['playlist'][0]['sources']
                        u = [i['file'] for i in u if 'file' in i]

                        for i in u:
                            try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'watch5s', 'url': i, 'direct': True, 'debridonly': False})
                            except: pass
                    except:
                        pass

                    try:
                        u = js['backup']
                        u = urlparse.parse_qs(urlparse.urlsplit(u).query)
                        u = dict([(i, u[i][0]) if u[i] else (i, '') for i in u])
                        eid = u['eid']
                        mid = u['mid']
                        p = client.request(self.backup_token_link % (eid, mid, _), XHR=True, referer=referer, timeout='10')
                        x = re.search('''_x=['"]([^"']+)''', p).group(1)
                        y = re.search('''_y=['"]([^"']+)''', p).group(1)
                        u = client.request(self.backup_link % (eid, x, y), referer=referer, XHR=True, timeout='10')
                        js = json.loads(u)
                        try:
                            u = js['playlist'][0]['sources']
                            u = [i['file'] for i in u if 'file' in i]

                            for i in u:
                                try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'watch5s', 'url': i, 'direct': True, 'debridonly': False})
                                except: pass
                        except:
                            pass
                    except:
                        pass
                except:
                    pass

            return sources
        except:
            return sources
Пример #16
0
    def get_sources(self, url):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):
                try:
                    data = urlparse.parse_qs(url)
                    data = dict([(i, data[i][0]) if data[i] else (i, '')
                                 for i in data])

                    title = data[
                        'tvshowtitle'] if 'tvshowtitle' in data else data[
                            'title']

                    year = re.findall(
                        '(\d{4})', data['premiered']
                    )[0] if 'tvshowtitle' in data else data['year']

                    try:
                        episode = data['episode']
                    except:
                        pass

                    query = {'keyword': title}
                    query.update(self.__get_token(query))
                    search_url = urlparse.urljoin(self.base_link, '/search')
                    search_url = search_url + '?' + urllib.urlencode(query)

                    result = client.source(search_url, safe=True)

                    r = client.parseDOM(
                        result, 'div', attrs={'class':
                                              '[^"]*movie-list[^"]*'})[0]
                    r = client.parseDOM(r, 'div', attrs={'class': 'item'})
                    r = [(client.parseDOM(i, 'a', ret='href'),
                          client.parseDOM(i, 'a', attrs={'class': 'name'}))
                         for i in r]
                    r = [(i[0][0], i[1][0]) for i in r
                         if len(i[0]) > 0 and len(i[1]) > 0]
                    r = [(re.sub('http.+?//.+?/', '/',
                                 i[0]), re.sub('&#\d*;', '', i[1])) for i in r]

                    if 'season' in data:
                        url = [(i[0], re.findall('(.+?) (\d*)$', i[1]))
                               for i in r]
                        url = [(i[0], i[1][0][0], i[1][0][1]) for i in url
                               if len(i[1]) > 0]
                        url = [
                            i for i in url
                            if cleantitle.get(title) == cleantitle.get(i[1])
                        ]
                        url = [
                            i for i in url if '%01d' %
                            int(data['season']) == '%01d' % int(i[2])
                        ]
                    else:
                        url = [
                            i for i in r
                            if cleantitle.get(title) == cleantitle.get(i[1])
                        ]

                    url = url[0][0]
                    url = urlparse.urljoin(self.base_link, url)
                except:
                    url == self.base_link

            try:
                url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(
                    url)[0]
            except:
                pass

            referer = url

            xtoken = self.__get_xtoken()

            if xtoken == None: raise Exception()

            result = client.source(url, safe=True)

            atr = [
                i for i in client.parseDOM(result, 'dd')
                if len(re.findall('(\d{4})', i)) > 0
            ][-1]
            if 'season' in data:
                result = result if atr == year or atr == data['year'] else None
            else:
                result = result if atr == year else None

            try:
                quality = client.parseDOM(result,
                                          'span',
                                          attrs={'class':
                                                 'quality'})[0].lower()
            except:
                quality = 'hd'
            if quality == 'cam' or quality == 'ts': quality = 'CAM'
            elif quality == 'hd' or 'hd ' in quality: quality = 'HD'
            else: quality = 'SD'

            result = client.parseDOM(result, 'ul', attrs={'id': 'servers'})

            servers = []
            servers = client.parseDOM(result,
                                      'li',
                                      attrs={'data-type': 'direct'})
            servers = zip(client.parseDOM(servers, 'a', ret='data-id'),
                          client.parseDOM(servers, 'a'))
            servers = [(i[0], re.findall('(\d+)', i[1])) for i in servers]
            servers = [(i[0], ''.join(i[1][:1])) for i in servers]

            try:
                servers = [
                    i for i in servers
                    if '%01d' % int(i[1]) == '%01d' % int(episode)
                ]
            except:
                pass

            for s in servers[:3]:
                try:
                    headers = {'X-Requested-With': 'XMLHttpRequest'}

                    url = urlparse.urljoin(self.base_link, self.hash_link)

                    query = {'id': s[0], 'update': '0', '_xtoken': xtoken}
                    query.update(self.__get_token(query))
                    url = url + '?' + urllib.urlencode(query)

                    result = client.source(url,
                                           headers=headers,
                                           referer=referer,
                                           safe=True)
                    result = json.loads(result)

                    query = result['params']
                    query['mobile'] = '0'
                    query.update(self.__get_token(query))
                    grabber = result['grabber'] + '?' + urllib.urlencode(query)

                    result = client.source(grabber,
                                           headers=headers,
                                           referer=url,
                                           safe=True)
                    result = json.loads(result)

                    result = result['data']
                    result = [i['file'] for i in result if 'file' in i]

                    for i in result:
                        try:
                            sources.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'provider':
                                'Ninemovies',
                                'url':
                                i,
                                'direct':
                                True,
                                'debridonly':
                                False
                            })
                        except:
                            pass
                except:
                    pass

            if quality == 'CAM':
                for i in sources:
                    i['quality'] = 'CAM'

            return sources
        except:
            return sources
Пример #17
0
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []

            if url == None: return sources

            r = urlparse.urljoin(self.base_link, url)

            result = client.request(r)

            f = client.parseDOM(result, 'iframe', ret='src')
            f = [i for i in f if 'iframe' in i][0]

            result = client.request(f, headers={'Referer': r})

            r = client.parseDOM(result, 'div', attrs = {'id': 'botones'})[0]
            r = client.parseDOM(r, 'a', ret='href')
            r = [(i, urlparse.urlparse(i).netloc) for i in r]

            links = []

            for u, h in r:
                if not 'pelispedia' in h and not 'thevideos.tv' in h: continue

                result = client.request(u, headers={'Referer': f})

                try:
                    if 'pelispedia' in h: raise Exception()

                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')\s*,\s*label\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url)
                    url = [i[0] for i in url if '720' in i[1]][0]

                    links.append({'source': 'cdn', 'quality': 'HD', 'url': url, 'direct': False})
                except:
                    pass

                try:
                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url)

                    for i in url:
                        try: links.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i, 'direct': True})
                        except: pass
                except:
                    pass

                try:
                    post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)', result)[0]
                    post = urllib.urlencode({'link': post})

                    url = urlparse.urljoin(self.base_link, '/Pe_flsh/plugins/gkpluginsphp.php')
                    url = client.request(url, post=post, XHR=True, referer=u)
                    url = json.loads(url)['link']

                    links.append({'source': 'gvideo', 'quality': 'HD', 'url': url, 'direct': True})
                except:
                    pass

                try:
                    post = re.findall('var\s+parametros\s*=\s*"([^"]+)', result)[0]

                    post = urlparse.parse_qs(urlparse.urlparse(post).query)['pic'][0]
                    post = urllib.urlencode({'sou': 'pic', 'fv': '23', 'url': post})

                    url = urlparse.urljoin(self.base_link, '/Pe_Player_Html5/pk/pk_2/plugins/protected.php')
                    url = client.request(url, post=post, XHR=True)
                    url = json.loads(url)[0]['url']

                    links.append({'source': 'cdn', 'quality': 'HD', 'url': url, 'direct': True})
                except:
                    pass

            for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Pelispedia', 'url': i['url'], 'direct': i['direct'], 'debridonly': False})

            return sources
        except:
            return sources
Пример #18
0
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []

            if url == None: return sources

            r = urlparse.urljoin(self.base_link, url)

            result = client.request(r)

            f = client.parseDOM(result, 'iframe', ret='src')
            f = [i for i in f if 'iframe' in i][0]

            result = client.request(f, headers={'Referer': r})

            r = client.parseDOM(result, 'div', attrs={'id': 'botones'})[0]
            r = client.parseDOM(r, 'a', ret='href')
            r = [(i, urlparse.urlparse(i).netloc) for i in r]

            links = []

            for u, h in r:
                if not 'pelispedia' in h and not 'thevideos.tv' in h: continue

                result = client.request(u, headers={'Referer': f})

                try:
                    if 'pelispedia' in h: raise Exception()

                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall(
                        'file\s*:\s*(?:\"|\')(.+?)(?:\"|\')\s*,\s*label\s*:\s*(?:\"|\')(.+?)(?:\"|\')',
                        url)
                    url = [i[0] for i in url if '720' in i[1]][0]

                    links.append({
                        'source': 'cdn',
                        'quality': 'HD',
                        'url': url,
                        'direct': False
                    })
                except:
                    pass

                try:
                    url = re.findall('sources\s*:\s*\[(.+?)\]', result)[0]
                    url = re.findall('file\s*:\s*(?:\"|\')(.+?)(?:\"|\')', url)

                    for i in url:
                        try:
                            links.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'url':
                                i,
                                'direct':
                                True
                            })
                        except:
                            pass
                except:
                    pass

                try:
                    post = re.findall('gkpluginsphp.*?link\s*:\s*"([^"]+)',
                                      result)[0]
                    post = urllib.urlencode({'link': post})

                    url = urlparse.urljoin(
                        self.base_link, '/Pe_flsh/plugins/gkpluginsphp.php')
                    url = client.request(url, post=post, XHR=True, referer=u)
                    url = json.loads(url)['link']

                    links.append({
                        'source': 'gvideo',
                        'quality': 'HD',
                        'url': url,
                        'direct': True
                    })
                except:
                    pass

                try:
                    post = re.findall('var\s+parametros\s*=\s*"([^"]+)',
                                      result)[0]

                    post = urlparse.parse_qs(
                        urlparse.urlparse(post).query)['pic'][0]
                    post = urllib.urlencode({
                        'sou': 'pic',
                        'fv': '23',
                        'url': post
                    })

                    url = urlparse.urljoin(
                        self.base_link,
                        '/Pe_Player_Html5/pk/pk_2/plugins/protected.php')
                    url = client.request(url, post=post, XHR=True)
                    url = json.loads(url)[0]['url']

                    links.append({
                        'source': 'cdn',
                        'quality': 'HD',
                        'url': url,
                        'direct': True
                    })
                except:
                    pass

            for i in links:
                sources.append({
                    'source': i['source'],
                    'quality': i['quality'],
                    'provider': 'Pelispedia',
                    'url': i['url'],
                    'direct': i['direct'],
                    'debridonly': False
                })

            return sources
        except:
            return sources
Пример #19
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user == '' or self.password == ''): raise Exception()
            login = urlparse.urljoin(self.base_link, '/login')
            post = {
                'username': self.user,
                'password': self.password,
                'returnpath': '/'
            }
            post = urllib.urlencode(post)

            headers = {'User-Agent': client.randomagent()}
            rlogin = client.request(login,
                                    headers=headers,
                                    post=post,
                                    output='extended')
            guid = re.findall('(.*?);\s', rlogin[2]['Set-Cookie'])[0]
            headers['Cookie'] += '; ' + guid
            url = urlparse.urljoin(self.base_link, url)

            result = client.request(url, headers=headers)

            url = re.findall("embeds\[\d+\]\s*=\s*'([^']+)", result)[0]
            url = client.parseDOM(url, 'iframe', ret='src')[0]
            url = url.replace('https://', 'http://')

            links = []

            try:
                dec = re.findall('mplanet\*(.+)', url)[0]
                dec = dec.rsplit('&')[0]
                dec = self._gkdecrypt(
                    base64.b64decode('MllVcmlZQmhTM2swYU9BY0lmTzQ='), dec)
                dec = directstream.google(dec)

                links += [(i['url'], i['quality'], 'gvideo') for i in dec]
            except:
                pass

            result = client.request(url, headers=headers)

            try:
                url = re.findall('src\s*=\s*(?:\'|\")(http.+?)(?:\'|\")',
                                 result)
                for i in url:
                    try:
                        links.append({
                            'source':
                            'gvideo',
                            'quality':
                            directstream.googletag(i)[0]['quality'],
                            'url':
                            i
                        })
                    except:
                        pass
            except:
                pass

            try:
                url = client.parseDOM(result, 'source', ret='src')
                url += re.findall('src\s*:\s*\'(.*?)\'', result)
                url = [i for i in url if '://' in i]
                links.append({'source': 'cdn', 'quality': 'HD', 'url': url[0]})
            except:
                pass

            for i in links:
                sources.append({
                    'source': i['source'],
                    'quality': i['quality'],
                    'language': 'en',
                    'url': i['url'],
                    'direct': True,
                    'debridonly': False
                })

            return sources
        except:
            return sources
Пример #20
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

                imdb = data['imdb'] ; year = data['year']

                if 'tvshowtitle' in data:
                    url = '%s/tv-show/%s/season/%01d/episode/%01d' % (self.base_link, cleantitle.geturl(title), int(data['season']), int(data['episode']))
                    result = client.request(url, limit='5')

                    if result == None:
                        t = cache.get(self.getImdbTitle, 900, imdb)
                        if title != t:
                            url = '%s/tv-show/%s/season/%01d/episode/%01d' % (self.base_link, cleantitle.geturl(t), int(data['season']), int(data['episode']))
                            result = client.request(url, limit='5')
                else:
                    url = '%s/movie/%s' % (self.base_link, cleantitle.geturl(title))
                    result = client.request(url, limit='5')

                    if result == None:
                        t = cache.get(self.getImdbTitle, 900, imdb)
                        if title != t:
                            url = '%s/movie/%s' % (self.base_link, cleantitle.geturl(t))
                            result = client.request(url, limit='5')

                if result == None and not 'tvshowtitle' in data:
                    url += '-%s' % year
                    result = client.request(url, limit='5')

                result = client.parseDOM(result, 'title')[0]

                if '%TITLE%' in result: raise Exception()

                r = client.request(url, output='extended')

                if not imdb in r[0]: raise Exception()

            else:
                url = urlparse.urljoin(self.base_link, url)

                r = client.request(url, output='extended')


            cookie = r[4] ; headers = r[3] ; result = r[0]

            try: auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
            except: auth = 'false'
            auth = 'Bearer %s' % urllib.unquote_plus(auth)

            headers['Authorization'] = auth
            headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
            headers['Accept'] = 'application/json, text/javascript, */*; q=0.01'
            headers['Cookie'] = cookie
            headers['Referer'] = url


            u = '/ajax/jne.php'
            u = urlparse.urljoin(self.base_link, u)

            action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'

            elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip())

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]

            post = {'action': action, 'idEl': idEl, 'token': token, 'elid': elid}
            post = urllib.urlencode(post)

            c = client.request(u, post=post, headers=headers, XHR=True, output='cookie', error=True)

            headers['Cookie'] = cookie + '; ' + c

            r = client.request(u, post=post, headers=headers, XHR=True)
            r = str(json.loads(r))
            r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r)

            for i in r:
                try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'movieshd', 'url': i, 'direct': True, 'debridonly': False})
                except: pass

            return sources
        except:
            return sources
Пример #21
0
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            url = path = re.sub('/watching.html$', '', url.strip('/'))
            url = referer = url + '/watching.html'

            p = client.request(url)
            p = re.findall(
                "data\s*:\s*{\s*id:\s*(\d+),\s*episode_id:\s*(\d+),\s*link_id:\s*(\d+)",
                p)[0]
            p = urllib.urlencode({
                'id': p[0],
                'episode_id': p[1],
                'link_id': p[2],
                '_': int(time.time() * 1000)
            })

            headers = {
                'Accept-Formating': 'application/json, text/javascript',
                'Server': 'cloudflare-nginx',
                'Referer': referer
            }

            r = urlparse.urljoin(self.base_link, '/ajax/movie/load_episodes')
            r = client.request(r, post=p, headers=headers, XHR=True)
            r = re.findall("load_player\(\s*'([^']+)'\s*,\s*'?(\d+)\s*'?", r)
            r = [i for i in r if int(i[1]) >= 720]

            for u in r:
                try:
                    p = urllib.urlencode({
                        'id': u[0],
                        'quality': u[1],
                        '_': int(time.time() * 1000)
                    })
                    u = urlparse.urljoin(self.base_link,
                                         '/ajax/movie/load_player_v2')

                    u = client.request(u, post=p, headers=headers, XHR=True)
                    u = json.loads(u)['playlist']
                    u = client.request(u, headers=headers, XHR=True)
                    u = json.loads(u)['playlist'][0]['sources']
                    u = [i['file'] for i in u if 'file' in i]

                    for i in u:
                        try:
                            sources.append({
                                'source':
                                'gvideo',
                                'quality':
                                directstream.googletag(i)[0]['quality'],
                                'provider':
                                'XMovies',
                                'url':
                                i,
                                'direct':
                                True,
                                'debridonly':
                                False
                            })
                        except:
                            pass
                except:
                    pass

            return sources
        except:
            return sources
Пример #22
0
    def get_sources(self, url):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):
                try:
                    data = urlparse.parse_qs(url)
                    data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                    title = data['tvshowtitle'] if 'tvshowtitle' in data else data['title']

                    year = re.findall('(\d{4})', data['premiered'])[0] if 'tvshowtitle' in data else data['year']

                    try: episode = data['episode']
                    except: pass

                    query = {'keyword': title}
                    query.update(self.__get_token(query))
                    search_url = urlparse.urljoin(self.base_link, '/search')
                    search_url = search_url + '?' + urllib.urlencode(query)

                    result = client.source(search_url, safe=True)

                    r = client.parseDOM(result, 'div', attrs = {'class': '[^"]*movie-list[^"]*'})[0]
                    r = client.parseDOM(r, 'div', attrs = {'class': 'item'})
                    r = [(client.parseDOM(i, 'a', ret='href'), client.parseDOM(i, 'a', attrs = {'class': 'name'})) for i in r]
                    r = [(i[0][0], i[1][0]) for i in r if len(i[0]) > 0 and  len(i[1]) > 0]
                    r = [(re.sub('http.+?//.+?/','/', i[0]), re.sub('&#\d*;','', i[1])) for i in r]

                    if 'season' in data:
                        url = [(i[0], re.findall('(.+?) (\d*)$', i[1])) for i in r]
                        url = [(i[0], i[1][0][0], i[1][0][1]) for i in url if len(i[1]) > 0]
                        url = [i for i in url if cleantitle.get(title) == cleantitle.get(i[1])]
                        url = [i for i in url if '%01d' % int(data['season']) == '%01d' % int(i[2])]
                    else:
                        url = [i for i in r if cleantitle.get(title) == cleantitle.get(i[1])]

                    url = url[0][0]
                    url = urlparse.urljoin(self.base_link, url)
                except:
                    url == self.base_link


            try: url, episode = re.compile('(.+?)\?episode=(\d*)$').findall(url)[0]
            except: pass

            referer = url

            xtoken = self.__get_xtoken()

            if xtoken == None: raise Exception()

            result = client.source(url, safe=True)


            atr = [i for i in client.parseDOM(result, 'dd') if len(re.findall('(\d{4})', i)) > 0][-1]
            if 'season' in data:
                result = result if atr == year or atr == data['year'] else None
            else:
                result = result if atr == year else None


            try: quality = client.parseDOM(result, 'span', attrs = {'class': 'quality'})[0].lower()
            except: quality = 'hd'
            if quality == 'cam' or quality == 'ts': quality = 'CAM'
            elif quality == 'hd' or 'hd ' in quality: quality = 'HD'
            else: quality = 'SD'

            result = client.parseDOM(result, 'ul', attrs = {'id': 'servers'})

            servers = []
            servers = client.parseDOM(result, 'li', attrs = {'data-type': 'direct'})
            servers = zip(client.parseDOM(servers, 'a', ret='data-id'), client.parseDOM(servers, 'a'))
            servers = [(i[0], re.findall('(\d+)', i[1])) for i in servers]
            servers = [(i[0], ''.join(i[1][:1])) for i in servers]

            try: servers = [i for i in servers if '%01d' % int(i[1]) == '%01d' % int(episode)]
            except: pass

            for s in servers[:3]:
                try:
                    headers = {'X-Requested-With': 'XMLHttpRequest'}

                    url = urlparse.urljoin(self.base_link, self.hash_link)

                    query = {'id': s[0], 'update': '0', '_xtoken': xtoken}
                    query.update(self.__get_token(query))
                    url = url + '?' + urllib.urlencode(query)

                    result = client.source(url, headers=headers, referer=referer, safe=True)
                    result = json.loads(result)

                    query = result['params']
                    query['mobile'] = '0'
                    query.update(self.__get_token(query))
                    grabber = result['grabber'] + '?' + urllib.urlencode(query)

                    result = client.source(grabber, headers=headers, referer=url, safe=True)
                    result = json.loads(result)

                    result = result['data']
                    result = [i['file'] for i in result if 'file' in i]

                    for i in result:
                        try: sources.append({'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'provider': 'Ninemovies', 'url': i, 'direct': True, 'debridonly': False})
                        except: pass
                except:
                    pass

            if quality == 'CAM':
                for i in sources: i['quality'] = 'CAM'

            return sources
        except:
            return sources
Пример #23
0
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []

            if url == None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            headers = {}
            title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                'title']

            if 'tvshowtitle' in data:
                url = '/tv-series/%s-season-%01d/watch/' % (
                    cleantitle.geturl(title), int(data['season']))
                year = str((int(data['year']) + int(data['season'])) - 1)
                episode = '%01d' % int(data['episode'])

            else:
                url = '/movie/%s/watch/' % cleantitle.geturl(title)
                year = data['year']
                episode = None

            url = urlparse.urljoin(self.base_link, url)
            referer = url

            r = client.request(url)

            y = re.findall('Release\s*:\s*.+?\s*(\d{4})', r)[0]

            if not year == y: raise Exception()

            r = client.parseDOM(r, 'div', attrs={'class': 'les-content'})
            r = zip(client.parseDOM(r, 'a', ret='href'),
                    client.parseDOM(r, 'a'))
            r = [(i[0], ''.join(re.findall('(\d+)', i[1])[:1])) for i in r]

            if not episode == None:
                r = [i[0] for i in r if '%01d' % int(i[1]) == episode]
            else:
                r = [i[0] for i in r]

            r = [i for i in r if '/server-' in i]

            for u in r:
                try:
                    p = client.request(u, referer=referer, timeout='10')

                    t = re.findall('player_type\s*:\s*"(.+?)"', p)[0]
                    if t == 'embed': raise Exception()

                    episodeId = client.parseDOM(p,
                                                'input',
                                                ret='value',
                                                attrs={'name': 'episodeID'})[0]
                    js = json.loads(
                        client.request(self.token_link,
                                       post=urllib.urlencode({'id':
                                                              episodeId}),
                                       referer=referer,
                                       timeout='10'))
                    hash = js['hash']
                    token = js['token']
                    _ = js['_']
                    url = self.grabber_link % (episodeId, hash, token, _)
                    u = client.request(url, referer=referer, timeout='10')
                    js = json.loads(u)

                    try:
                        u = js['playlist'][0]['sources']
                        u = [i['file'] for i in u if 'file' in i]

                        for i in u:
                            try:
                                sources.append({
                                    'source':
                                    'gvideo',
                                    'quality':
                                    directstream.googletag(i)[0]['quality'],
                                    'provider':
                                    'watch5s',
                                    'url':
                                    i,
                                    'direct':
                                    True,
                                    'debridonly':
                                    False
                                })
                            except:
                                pass
                    except:
                        pass

                    try:
                        u = js['backup']
                        u = urlparse.parse_qs(urlparse.urlsplit(u).query)
                        u = dict([(i, u[i][0]) if u[i] else (i, '')
                                  for i in u])
                        eid = u['eid']
                        mid = u['mid']
                        p = client.request(self.backup_token_link %
                                           (eid, mid, _),
                                           XHR=True,
                                           referer=referer,
                                           timeout='10')
                        x = re.search('''_x=['"]([^"']+)''', p).group(1)
                        y = re.search('''_y=['"]([^"']+)''', p).group(1)
                        u = client.request(self.backup_link % (eid, x, y),
                                           referer=referer,
                                           XHR=True,
                                           timeout='10')
                        js = json.loads(u)
                        try:
                            u = js['playlist'][0]['sources']
                            u = [i['file'] for i in u if 'file' in i]

                            for i in u:
                                try:
                                    sources.append({
                                        'source':
                                        'gvideo',
                                        'quality':
                                        directstream.googletag(i)[0]
                                        ['quality'],
                                        'provider':
                                        'watch5s',
                                        'url':
                                        i,
                                        'direct':
                                        True,
                                        'debridonly':
                                        False
                                    })
                                except:
                                    pass
                        except:
                            pass
                    except:
                        pass
                except:
                    pass

            return sources
        except:
            return sources
Пример #24
0
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])

            url = data['url']
            episode = int(data['episode'])

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)
            p = client.request(url, timeout='10')

            if episode > 0:
                r = client.parseDOM(p, 'div', attrs={'class': 'ep_link.+?'})[0]
                r = zip(client.parseDOM(r, 'a', ret='href'),
                        client.parseDOM(r, 'a'))
                r = [(i[0], re.findall('Episode\s+(\d+)', i[1])) for i in r]
                r = [(i[0], i[1][0]) for i in r]
                r = [i[0] for i in r if int(i[1]) == episode][0]
                p = client.request(r, timeout='10')

            p = re.findall('load_player\((\d+)\)', p)
            p = urllib.urlencode({'id': p[0]})
            headers = {'Referer': url}
            r = urlparse.urljoin(self.base_link, '/ajax/movie/load_player_v3')
            r = client.request(r,
                               post=p,
                               headers=headers,
                               XHR=True,
                               timeout='10')
            url = json.loads(r)['value']
            url = client.request(url,
                                 headers=headers,
                                 XHR=True,
                                 output='geturl',
                                 timeout='10')

            if 'openload.io' in url or 'openload.co' in url or 'oload.tv' in url:
                sources.append({
                    'source': 'openload.co',
                    'quality': 'HD',
                    'provider': '1movies',
                    'url': url,
                    'direct': False,
                    'debridonly': False
                })
                raise Exception()

            r = client.request(url, headers=headers, XHR=True, timeout='10')
            try:
                src = json.loads(r)['playlist'][0]['sources']
                links = [i['file'] for i in src if 'file' in i]
                for i in links:
                    try:
                        sources.append({
                            'source':
                            'gvideo',
                            'quality':
                            directstream.googletag(i)[0]['quality'],
                            'provider':
                            '1movies',
                            'url':
                            i,
                            'direct':
                            True,
                            'debridonly':
                            False
                        })
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources
Пример #25
0
    def sources(self, url, hostDict, hostprDict):
        sources = []

        try:
            if not url:
                return sources

            ref = urlparse.urljoin(self.base_link, url)

            r = client.request(ref)

            p = re.findall('load_player\((\d+)\)', r)
            r = client.request(urlparse.urljoin(self.base_link,
                                                self.player_link),
                               post={'id': p[0]},
                               referer=ref,
                               XHR=True)
            url = json.loads(r).get('value')
            link = client.request(url, XHR=True, output='geturl', referer=ref)

            if '1movies.' in link:
                r = client.request(link, XHR=True, referer=ref)
                r = [(match[1], match[0]) for match in re.findall(
                    '''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''',
                    r, re.DOTALL)]
                r = [(re.sub('[^\d]+', '', x[0]), x[1].replace('\/', '/'))
                     for x in r]
                r = [x for x in r if x[0]]

                links = [(x[1], '4K') for x in r if int(x[0]) >= 2160]
                links += [(x[1], '1440p') for x in r if int(x[0]) >= 1440]
                links += [(x[1], '1080p') for x in r if int(x[0]) >= 1080]
                links += [(x[1], 'HD') for x in r if 720 <= int(x[0]) < 1080]
                links += [(x[1], 'SD') for x in r if int(x[0]) < 720]

                for url, quality in links:
                    sources.append({
                        'source': 'gvideo',
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': True,
                        'debridonly': False
                    })
            else:
                valid, host = source_utils.is_host_valid(link, hostDict)
                if not valid: return

                urls = []
                if 'google' in link:
                    host = 'gvideo'
                    direct = True
                    urls = directstream.google(link)
                if 'google' in link and not urls and directstream.googletag(
                        link):
                    host = 'gvideo'
                    direct = True
                    urls = [{
                        'quality':
                        directstream.googletag(link)[0]['quality'],
                        'url':
                        link
                    }]
                elif 'ok.ru' in link:
                    host = 'vk'
                    direct = True
                    urls = directstream.odnoklassniki(link)
                elif 'vk.com' in link:
                    host = 'vk'
                    direct = True
                    urls = directstream.vk(link)
                else:
                    direct = False
                    urls = [{'quality': 'HD', 'url': link}]

                for x in urls:
                    sources.append({
                        'source': host,
                        'quality': x['quality'],
                        'language': 'en',
                        'url': x['url'],
                        'direct': direct,
                        'debridonly': False
                    })

            return sources
        except:
            return sources
Пример #26
0
    def get_sources(self, url):
        logger.debug('%s SOURCES URL %s' % (self.__class__, url))

        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])

                title = data['title']

                imdb = data['imdb']

                match = title.replace('-', '').replace(':', '').replace('\'', '').replace(' ', '-').replace('--', '-').lower()

                url = '%s/movie/%s' % (self.base_link, match)

                result = client.request(url, limit='1')
                result = client.parseDOM(result, 'title')[0]

                if '%TITLE%' in result: raise Exception()

                result, headers, content, cookie = client.request(url, output='extended')

                if not imdb in result: raise Exception()


            else:

                result, headers, content, cookie = client.request(url, output='extended')

            print cookie
            auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
            auth = 'Bearer %s' % urllib.unquote_plus(auth)

            headers['Authorization'] = auth
            headers['X-Requested-With'] = 'XMLHttpRequest'
            headers['Referer'] = url

            u = 'http://www.putlocker.systems/ajax/embeds.php'

            action = 'getMovieEmb'

            elid = urllib.quote(base64.encodestring(str(int(time.time()))).strip())

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]

            post = {'action': action, 'idEl': idEl, 'token': token, 'elid': elid}
            post = urllib.urlencode(post)


            r = client.request(u, post=post, headers=headers)
            r = str(json.loads(r))
            r = client.parseDOM(r, 'iframe', ret='.+?') + client.parseDOM(r, 'IFRAME', ret='.+?')

            links = []

            for i in r:
                try: links += [{'source': 'gvideo', 'quality': directstream.googletag(i)[0]['quality'], 'url': i, 'direct': True}]
                except: pass

            links += [{'source': 'openload.co', 'quality': 'SD', 'url': i, 'direct': False} for i in r if 'openload.co' in i]

            links += [{'source': 'videomega.tv', 'quality': 'SD', 'url': i, 'direct': False} for i in r if 'videomega.tv' in i]


            for i in links: sources.append({'source': i['source'], 'quality': i['quality'], 'provider': 'Putlocker', 'url': i['url'], 'direct': i['direct'], 'debridonly': False})

            logger.debug('%s SOURCES [%s]' % (__name__,sources))
            return sources
        except:
            import traceback
            traceback.print_exc()
            return sources
Пример #27
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        #sources.append({'source': host.split('.')[0], 'quality': 'SD', 'provider': 'Movie25', 'url': url})
        sources = []
        results = []

        try:

            if url == None: return sources

            if url.startswith('http'): self.base_link = url

            url = urlparse.urljoin(self.base_link, url)
            url = referer = url.replace('/watching.html', '')
            url = url.replace('.html', '')
            #if not url.endswith('/'): url = url + "/watching.html"
            #else : url = url + "watching.html"

            try:
                url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0]
            except:
                episode = None

            r = self.request(url + '/watching.html')[0]
            try:
                mid = re.compile('name="movie_id" value="(.+?)"').findall(r)[0]
            except:
                mid = re.compile('id: "(.+?)"').findall(r)[0]

            try:
                headers = {'Referer': url}

                time_now = int(time.time() * 10000)
                EPISODES = '/ajax/v4_movie_episodes/%s' % (mid)
                EPISODES = urlparse.urljoin(self.base_link, EPISODES)
                r = self.request(EPISODES)[0]
                r = json.loads(r)['html']
                r = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                ids = client.parseDOM(r, 'li', ret='data-id')
                servers = client.parseDOM(r, 'li', ret='data-server')
                labels = client.parseDOM(r, 'a', ret='title')
                r = zip(ids, servers, labels)
                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?',
                                            eid[2].lower())[0]
                        except:
                            ep = 0
                        if (episode is None) or (int(ep) == int(episode)):
                            url = urlparse.urljoin(
                                self.base_link,
                                self.token_link % (eid[0], mid))
                            script = client.request(url)
                            if '$_$' in script:
                                params = self.uncensored1(script)
                            elif script.startswith('[]') and script.endswith(
                                    '()'):
                                params = self.uncensored2(script)
                            else:
                                raise Exception()
                            u = urlparse.urljoin(
                                self.base_link, self.sourcelink %
                                (eid[0], params['x'], params['y']))
                            r = client.request(u, XHR=True)
                            url = json.loads(r)['playlist'][0]['sources']
                            url = [i['file'] for i in url if 'file' in i]
                            url = [directstream.googletag(i) for i in url]
                            url = [i[0] for i in url if i]
                            for s in url:
                                sources.append({
                                    'source': 'gvideo',
                                    'quality': s['quality'],
                                    'url': s['url'],
                                    'provider': 'Solarmovies'
                                })
                    except:
                        pass
            except:
                pass

            return sources

        except Exception as e:
            control.log('ERROR SOLAR %s' % e)
            return sources
Пример #28
0
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '')
                         for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'],
                                      aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases,
                                       headers)

            mid = re.findall('-(\d+)', url)[-1]

            try:
                headers = {'Referer': url}
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = client.request(u, headers=headers, XHR=True)
                r = json.loads(r)['html']
                r = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                ids = client.parseDOM(r, 'li', ret='data-id')
                servers = client.parseDOM(r, 'li', ret='data-server')
                labels = client.parseDOM(r, 'a', ret='title')
                r = zip(ids, servers, labels)
                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?',
                                            eid[2].lower())[0]
                        except:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            url = urlparse.urljoin(
                                self.base_link,
                                self.token_link % (eid[0], mid))
                            script = client.request(url)
                            if '$_$' in script:
                                params = self.uncensored1(script)
                            elif script.startswith('[]') and script.endswith(
                                    '()'):
                                params = self.uncensored2(script)
                            elif '_x=' in script:
                                x = re.search('''_x=['"]([^"']+)''',
                                              script).group(1)
                                y = re.search('''_y=['"]([^"']+)''',
                                              script).group(1)
                                params = {'x': x, 'y': y}
                            else:
                                raise Exception()

                            u = urlparse.urljoin(
                                self.base_link, self.source_link %
                                (eid[0], params['x'], params['y']))
                            r = client.request(u, XHR=True)
                            url = json.loads(r)['playlist'][0]['sources']
                            url = [i['file'] for i in url if 'file' in i]
                            url = [directstream.googletag(i) for i in url]
                            url = [i[0] for i in url if i]

                            for s in url:
                                sources.append({
                                    'source': 'gvideo',
                                    'quality': s['quality'],
                                    'language': 'en',
                                    'url': s['url'],
                                    'direct': True,
                                    'debridonly': False
                                })
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources
Пример #29
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            mozhdr = {
                'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3'}
            headers = mozhdr
            headers['X-Requested-With'] = 'XMLHttpRequest'

            self.s = cfscrape.create_scraper()
            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            headers['Referer'] = url
            ref_url = url
            mid = re.findall('-(\d*)\.', url)[0]
            data = {'id': mid}
            r = self.s.post(url, headers=headers)
            try:
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = self.s.get(u, headers=mozhdr).content
                r = json.loads(r)['html']
                rl = client.parseDOM(r, 'div', attrs={'class': 'pas-list'})
                rh = client.parseDOM(r, 'div', attrs={'class': 'pas-header'})
                ids = client.parseDOM(rl, 'li', ret='data-id')
                servers = client.parseDOM(rl, 'li', ret='data-server')
                labels = client.parseDOM(rl, 'a', ret='title')
                r = zip(ids, servers, labels)
                rrr = zip(client.parseDOM(rh, 'li', ret='data-id'), client.parseDOM(rh, 'li', ret='class'))
                types = {}
                for rr in rrr:
                    types[rr[0]] = rr[1]

                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?', eid[2].lower())[0]
                        except:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            t = str(int(time.time() * 1000))
                            quali = source_utils.get_release_quality(eid[2])[0]
                            if 'embed' in types[eid[1]]:
                                url = urlparse.urljoin(self.base_link, self.embed_link % (eid[0]))
                                xml = self.s.get(url, headers=headers).content
                                url = json.loads(xml)['src']
                                valid, hoster = source_utils.is_host_valid(url, hostDict)
                                if not valid: continue
                                q = source_utils.check_sd_url(url)
                                q = q if q != 'SD' else quali
                                sources.append(
                                    {'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': False,
                                     'debridonly': False})
                                continue
                            else:
                                url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid, t))
                            script = self.s.get(url, headers=headers).content
                            if '$_$' in script:
                                params = self.uncensored1(script)
                            elif script.startswith('[]') and script.endswith('()'):
                                params = self.uncensored2(script)
                            elif '_x=' in script:
                                x = re.search('''_x=['"]([^"']+)''', script).group(1)
                                y = re.search('''_y=['"]([^"']+)''', script).group(1)
                                params = {'x': x, 'y': y}
                            else:
                                raise Exception()
                            u = urlparse.urljoin(self.base_link, self.source_link % (eid[0], params['x'], params['y']))
                            length = 0
                            count = 0
                            while length == 0 and count < 11:
                                r = self.s.get(u, headers=headers).text
                                length = len(r)
                                if length == 0: count += 1
                            uri = None
                            uri = json.loads(r)['playlist'][0]['sources']
                            try:
                                uri = [i['file'] for i in uri if 'file' in i]
                            except:
                                try:
                                    uri = [uri['file']]
                                except:
                                    continue

                            for url in uri:
                                if 'googleapis' in url:
                                    q = source_utils.check_sd_url(url)
                                    sources.append(
                                        {'source': 'gvideo', 'quality': q, 'language': 'en', 'url': url, 'direct': True,
                                         'debridonly': False})
                                    continue

                                valid, hoster = source_utils.is_host_valid(url, hostDict)
                                # urls, host, direct = source_utils.check_directstreams(url, hoster)
                                q = quali
                                if valid:
                                    # for z in urls:
                                    if hoster == 'gvideo':
                                        direct = True
                                        try:
                                            q = directstream.googletag(url)[0]['quality']
                                        except:
                                            pass
                                        url = directstream.google(url, ref=ref_url)
                                    else:
                                        direct = False
                                    sources.append(
                                        {'source': hoster, 'quality': q, 'language': 'en', 'url': url, 'direct': direct,
                                         'debridonly': False})
                                else:
                                    sources.append(
                                        {'source': 'CDN', 'quality': q, 'language': 'en', 'url': url, 'direct': True,
                                         'debridonly': False})
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources
Пример #30
0
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            r = urlparse.urljoin(self.base_link, url)

            result = client.request(r)

            f = client.parseDOM(result, 'div', attrs={'class': 'movieplay'})
            if not f:
                f = client.parseDOM(result, 'div', attrs={'class': 'embed2'})
                f = client.parseDOM(f, 'div')

            f = client.parseDOM(f, 'iframe', ret='data-lazy-src')

            dupes = []

            for u in f:
                try:
                    sid = urlparse.parse_qs(
                        urlparse.urlparse(u).query)['id'][0]

                    if sid in dupes: raise Exception()
                    dupes.append(sid)
                    if 'stream/ol.php' in u:
                        url = client.request(u,
                                             timeout='10',
                                             XHR=True,
                                             referer=u)
                        url = client.parseDOM(url, 'iframe', ret='src')[0]
                        sources.append({
                            'source': 'openload.co',
                            'quality': 'HD',
                            'language': 'en',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })

                    if 'stream/play.php' in u:
                        url = client.request(u,
                                             timeout='10',
                                             XHR=True,
                                             referer=u)
                        url = client.parseDOM(url, 'a', ret='href')
                        url = [i for i in url if '.php' in i][0]
                        url = 'http:' + url if url.startswith('//') else url
                        url = client.request(url,
                                             timeout='10',
                                             XHR=True,
                                             referer=u)

                        url = re.findall('sources\s*:\s*\[(.+?)\]', url)[0]
                        links = json.loads('[' + url + ']')

                        for i in links:
                            try:
                                quality = re.findall('(\d+)', i['label'])[0]
                                if int(quality) >= 1080:
                                    quality = '1080p'
                                elif 720 <= int(quality) < 1080:
                                    quality = 'HD'
                                else:
                                    quality = 'SD'

                                try:
                                    quality = directstream.googletag(
                                        i['file'])[0]['quality']
                                except:
                                    pass

                                sources.append({
                                    'source': 'gvideo',
                                    'quality': quality,
                                    'language': 'en',
                                    'url': i['file'],
                                    'direct': True,
                                    'debridonly': False
                                })
                            except:
                                pass
                except:
                    pass

            return sources
        except:
            return sources
Пример #31
0
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []

            if url is None: return sources

            data = urlparse.parse_qs(url)
            data = dict([(i, data[i][0]) if data[i] else (i, '') for i in data])
            aliases = eval(data['aliases'])
            headers = {}

            if 'tvshowtitle' in data:
                episode = int(data['episode'])
                url = self.searchShow(data['tvshowtitle'], data['season'], aliases, headers)
            else:
                episode = 0
                url = self.searchMovie(data['title'], data['year'], aliases, headers)

            mid = re.findall('-(\d+)', url)[-1]

            try:
                headers = {'Referer': url}
                u = urlparse.urljoin(self.base_link, self.server_link % mid)
                r = client.request(u, headers=headers, XHR=True)
                r = json.loads(r)['html']
                r = client.parseDOM(r, 'div', attrs = {'class': 'pas-list'})
                ids = client.parseDOM(r, 'li', ret='data-id')
                servers = client.parseDOM(r, 'li', ret='data-server')
                labels = client.parseDOM(r, 'a', ret='title')
                r = zip(ids, servers, labels)
                for eid in r:
                    try:
                        try:
                            ep = re.findall('episode.*?(\d+).*?',eid[2].lower())[0]
                        except:
                            ep = 0
                        if (episode == 0) or (int(ep) == episode):
                            url = urlparse.urljoin(self.base_link, self.token_link % (eid[0], mid))
                            script = client.request(url)
                            if '$_$' in script:
                                params = self.uncensored1(script)
                            elif script.startswith('[]') and script.endswith('()'):
                                params = self.uncensored2(script)
                            elif '_x=' in script:
                                x = re.search('''_x=['"]([^"']+)''', script).group(1)
                                y = re.search('''_y=['"]([^"']+)''', script).group(1)
                                params = {'x': x, 'y': y}
                            else:
                                raise Exception()

                            u = urlparse.urljoin(self.base_link, self.source_link % (eid[0], params['x'], params['y']))
                            r = client.request(u, XHR=True)
                            url = json.loads(r)['playlist'][0]['sources']
                            url = [i['file'] for i in url if 'file' in i]
                            url = [directstream.googletag(i) for i in url]
                            url = [i[0] for i in url if i]

                            for s in url:
                                sources.append({'source': 'gvideo', 'quality': s['quality'], 'language': 'en',
                                                'url': s['url'], 'direct': True, 'debridonly': False})
                    except:
                        pass
            except:
                pass

            return sources
        except:
            return sources
Пример #32
0
    def get_sources(self, url):
        logger.debug('%s SOURCES URL %s' % (self.__class__, url))

        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['title']

                imdb = data['imdb']

                match = title.replace('-', '').replace(':', '').replace(
                    '\'', '').replace(' ', '-').replace('--', '-').lower()

                url = '%s/movie/%s' % (self.base_link, match)

                result = client.request(url, limit='1')
                result = client.parseDOM(result, 'title')[0]

                if '%TITLE%' in result: raise Exception()

                result, headers, content, cookie = client.request(
                    url, output='extended')

                if not imdb in result: raise Exception()

            else:

                result, headers, content, cookie = client.request(
                    url, output='extended')

            print cookie
            auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
            auth = 'Bearer %s' % urllib.unquote_plus(auth)

            headers['Authorization'] = auth
            headers['X-Requested-With'] = 'XMLHttpRequest'
            headers['Referer'] = url

            u = 'http://www.putlocker.systems/ajax/embeds.php'

            action = 'getMovieEmb'

            elid = urllib.quote(
                base64.encodestring(str(int(time.time()))).strip())

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]

            post = {
                'action': action,
                'idEl': idEl,
                'token': token,
                'elid': elid
            }
            post = urllib.urlencode(post)

            r = client.request(u, post=post, headers=headers)
            r = str(json.loads(r))
            r = client.parseDOM(r, 'iframe', ret='.+?') + client.parseDOM(
                r, 'IFRAME', ret='.+?')

            links = []

            for i in r:
                try:
                    links += [{
                        'source':
                        'gvideo',
                        'quality':
                        directstream.googletag(i)[0]['quality'],
                        'url':
                        i,
                        'direct':
                        True
                    }]
                except:
                    pass

            links += [{
                'source': 'openload.co',
                'quality': 'SD',
                'url': i,
                'direct': False
            } for i in r if 'openload.co' in i]

            links += [{
                'source': 'videomega.tv',
                'quality': 'SD',
                'url': i,
                'direct': False
            } for i in r if 'videomega.tv' in i]

            for i in links:
                sources.append({
                    'source': i['source'],
                    'quality': i['quality'],
                    'provider': 'Putlocker',
                    'url': i['url'],
                    'direct': i['direct'],
                    'debridonly': False
                })

            logger.debug('%s SOURCES [%s]' % (__name__, sources))
            return sources
        except:
            import traceback
            traceback.print_exc()
            return sources
Пример #33
0
    def get_sources(self, url, hosthdDict, hostDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            try:
                url, episode = re.findall('(.+?)\?episode=(\d*)$', url)[0]
            except:
                episode = None

            ref = url

            for i in range(3):
                result = client.request(url)
                if not result == None: break

            if not episode == None:
                result = client.parseDOM(result,
                                         'div',
                                         attrs={'id': 'ip_episode'})[0]
                ep_url = client.parseDOM(result,
                                         'a',
                                         attrs={'data-name': str(episode)},
                                         ret='href')[0]
                for i in range(3):
                    result = client.request(ep_url)
                    if not result == None: break

            r = client.parseDOM(result,
                                'div',
                                attrs={'class': '[^"]*server_line[^"]*'})

            for u in r:
                try:
                    url = urlparse.urljoin(
                        self.base_link, '/ip.file/swf/plugins/ipplugins.php')
                    p1 = client.parseDOM(u, 'a', ret='data-film')[0]
                    p2 = client.parseDOM(u, 'a', ret='data-server')[0]
                    p3 = client.parseDOM(u, 'a', ret='data-name')[0]
                    post = {
                        'ipplugins': 1,
                        'ip_film': p1,
                        'ip_server': p2,
                        'ip_name': p3
                    }
                    post = urllib.urlencode(post)
                    for i in range(3):
                        result = client.request(url,
                                                post=post,
                                                XHR=True,
                                                referer=ref,
                                                timeout='10')
                        if not result == None: break

                    result = json.loads(result)
                    u = result['s']
                    s = result['v']

                    url = urlparse.urljoin(
                        self.base_link, '/ip.file/swf/ipplayer/ipplayer.php')

                    post = {'u': u, 'w': '100%', 'h': '420', 's': s, 'n': 0}
                    post = urllib.urlencode(post)

                    for i in range(3):
                        result = client.request(url,
                                                post=post,
                                                XHR=True,
                                                referer=ref)
                        if not result == None: break

                    url = json.loads(result)['data']

                    if type(url) is list:
                        url = [i['files'] for i in url]
                        for i in url:
                            try:
                                sources.append({
                                    'source':
                                    'gvideo',
                                    'quality':
                                    directstream.googletag(i)[0]['quality'],
                                    'provider':
                                    'TuneMovie',
                                    'url':
                                    i,
                                    'direct':
                                    True,
                                    'debridonly':
                                    False
                                })
                            except:
                                pass

                    else:
                        url = client.request(url)
                        url = client.parseDOM(url,
                                              'source',
                                              ret='src',
                                              attrs={'type': 'video.+?'})[0]
                        url += '|%s' % urllib.urlencode(
                            {'User-agent': client.randomagent()})
                        sources.append({
                            'source': 'cdn',
                            'quality': 'HD',
                            'provider': 'TuneMovie',
                            'url': url,
                            'direct': False,
                            'debridonly': False
                        })

                except:
                    pass

            return sources
        except:
            return sources
Пример #34
0
    def get_sources(self, url, hostDict, hostprDict, lowDict):
        try:
            sources = []

            if url == None: return sources

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']

                imdb = data['imdb']
                year = data['year']

                if 'tvshowtitle' in data:
                    url = '%s/tv-show/%s/season/%01d/episode/%01d' % (
                        self.base_link, cleantitle.geturl(title),
                        int(data['season']), int(data['episode']))
                else:
                    url = '%s/movie/%s' % (self.base_link,
                                           cleantitle.geturl(title))

                result = client.request(url, limit='5')

                if result == None and not 'tvshowtitle' in data:
                    url += '-%s' % year
                    result = client.request(url, limit='5')

                result = client.parseDOM(result, 'title')[0]

                if '%TITLE%' in result: raise Exception()

                r = client.request(url, output='extended')

                if not imdb in r[0]: raise Exception()

            else:
                url = urlparse.urljoin(self.base_link, url)

                r = client.request(url, output='extended')

            cookie = r[4]
            headers = r[3]
            result = r[0]

            try:
                auth = re.findall('__utmx=(.+)', cookie)[0].split(';')[0]
            except:
                auth = 'false'
            auth = 'Bearer %s' % urllib.unquote_plus(auth)

            headers['Authorization'] = auth
            headers[
                'Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8'
            headers[
                'Accept'] = 'application/json, text/javascript, */*; q=0.01'
            headers['Cookie'] = cookie
            headers['Referer'] = url

            u = '/ajax/tnembeds.php'
            self.base_link = client.request(self.base_link, output='geturl')
            u = urlparse.urljoin(self.base_link, u)

            action = 'getEpisodeEmb' if '/episode/' in url else 'getMovieEmb'

            elid = urllib.quote(
                base64.encodestring(str(int(time.time()))).strip())

            token = re.findall("var\s+tok\s*=\s*'([^']+)", result)[0]

            idEl = re.findall('elid\s*=\s*"([^"]+)', result)[0]

            post = {
                'action': action,
                'idEl': idEl,
                'token': token,
                'elid': elid
            }
            post = urllib.urlencode(post)

            r = client.request(u, post=post, XHR=True)
            r = str(json.loads(r))
            r = re.findall('\'(http.+?)\'', r) + re.findall('\"(http.+?)\"', r)

            for i in r:
                try:
                    sources.append({
                        'source':
                        'gvideo',
                        'quality':
                        directstream.googletag(i)[0]['quality'],
                        'provider':
                        'PutLocker',
                        'url':
                        i,
                        'direct':
                        True,
                        'debridonly':
                        False
                    })
                except:
                    pass

            return sources
        except:
            return sources
Пример #35
0
    def get_sources(self, url, hostDict, hostprDict, locDict):
        try:
            sources = []

            if url == None: return sources

            url = urlparse.urljoin(self.base_link, url)

            r = client.request(url)

            r = client.parseDOM(
                r, 'div', attrs={'class':
                                 'screen fluid-width-video-wrapper'})[0]
            r = re.findall('src\s*=\s*"(.*?)"', r)[0]

            r = urlparse.urljoin(self.base_link, r)

            r = client.request(r, referer=url)

            links = []

            url = re.findall('src\s*=\s*"(.*?)"', r)
            url = [i for i in url if 'http' in i]

            for i in url:
                try:
                    links += [{
                        'source':
                        'gvideo',
                        'url':
                        i,
                        'quality':
                        directstream.googletag(i)[0]['quality'],
                        'direct':
                        True
                    }]
                except:
                    pass

            url = re.findall(
                '(openload\.(?:io|co)/(?:embed|f)/[0-9a-zA-Z-_]+)', r)
            url = ['http://' + i for i in url]

            for i in url:
                try:
                    links += [{
                        'source': 'openload.co',
                        'url': i,
                        'quality': 'HD',
                        'direct': False
                    }]
                except:
                    pass

            for i in links:
                sources.append({
                    'source': i['source'],
                    'quality': i['quality'],
                    'provider': 'Rainierland',
                    'url': i['url'],
                    'direct': i['direct'],
                    'debridonly': False
                })

            return sources
        except:
            return sources