def resolve(self, url):
        try:
            if 'vidlink' in url:
                ua = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1'}
                html = client.request(url, headers=ua)
                postID = re.findall("postID\s*=\s*'([^']+)", html)[0]

                rid = client.request('https://vidlink.org/embed/update_views', post=None, headers=ua, referer=url)
                id_view = re.findall('''id_view['"]\s*:\s*['"]([^'"]+)['"]''', rid)[0]

                plink = 'https://vidlink.org/streamdrive/info'
                data = {'browserName': 'Firefox',
                        'platform': 'Win32',
                        'postID': postID,
                        'id_view': id_view}
                headers = ua
                headers['X-Requested-With'] = 'XMLHttpRequest'
                headers['Referer'] = url
                ihtml = client.request(plink, post=data, headers=headers)
                linkcode = jsunpack.unpack(ihtml).replace('\\', '')
                sources = json.loads(re.findall('window\.srcs\s*=\s*([^;]+)', linkcode, re.DOTALL)[0])
                for src in sources:
                    link = src['url']
                    return link
            else:
                return url
        except BaseException:
            return url
Example #2
0
    def resolve(self, data):
        try:
            hostURL = None
            DELAY_PER_REQUEST = 1000 # In milliseconds.

            startTime = datetime.now()
            session = self._createSession(data['UA'], data['cookies'], data['referer'])
            r = self._sessionGET(data['pageURL'], session, allowRedirects=False)
            if r.ok:
                if 'Location' in r.headers:
                    hostURL = r.headers['Location'] # For most hosts they redirect.
                else:
                    # On rare cases they JS-pack the host link in the page source.
                    try:
                        hostURL = re.search(r'''go\(\\['"](.*?)\\['"]\);''', jsunpack.unpack(r.text)).group(1)
                    except:
                        pass # Or sometimes their page is just broken.

            # Do a little delay, if necessary, between resolve() calls.
            elapsed = int((datetime.now() - startTime).total_seconds())
            if elapsed < DELAY_PER_REQUEST:
                time.sleep(max(DELAY_PER_REQUEST - elapsed, 100))

            return hostURL
        except:
            self._logException()
            return None
    def resolve(self, url):
        try:

            if '/stream/' in url or '/watch/' in url:

                r = client.request(url, referer=self.base_link)
                link = client.parseDOM(r,
                                       'a',
                                       ret='data-href',
                                       attrs={'id': 'iframe_play'})[0]
            else:
                try:

                    data = client.request(url, referer=self.base_link)
                    data = re.findall(r'\s*(eval.+?)\s*</script', data,
                                      re.DOTALL)[0]
                    link = jsunpack.unpack(data)
                    link = link.replace('\\', '')
                    if 'eval' in link:
                        link = jsunpack.unpack(link)
                    link = link.replace('\\', '')
                    host = re.findall('hosted=\'(.+?)\';var', link,
                                      re.DOTALL)[0]
                    if 'streamango' in host:
                        loc = re.findall('''loc\s*=\s*['"](.+?)['"]''', link,
                                         re.DOTALL)[0]
                        link = 'https://streamango.com/embed/{0}'.format(loc)
                    elif 'openload' in host:
                        loc = re.findall('''loc\s*=\s*['"](.+?)['"]''', link,
                                         re.DOTALL)[0]
                        link = 'https://openload.co/embed/{0}'.format(loc)
                    else:
                        link = re.findall('''loc\s*=\s*['"](.+?)['"]\;''',
                                          re.DOTALL)[0]
                except BaseException:
                    link = client.request(url, output='geturl', timeout=10)
                    print link
                    if link == url:
                        return
                    else:
                        return link

            return link
        except Exception:
            return
Example #4
0
 def resolve(self, url):
     if 'streamty.com' in url:
         h = {'User-Agent': client.randomagent()}
         html = self.scraper.get(url, headers=h).content
         packed = find_match(data, "text/javascript'>(eval.*?)\s*</script>")
         unpacked = jsunpack.unpack(packed)
         link = find_match(unpacked, 'file:"([^"]+)"')[0]
         return link
     return url
def more_vidlink(link, hostDict):
    sources = []  # By Shellc0de
    try:
        ua = {
            'User-Agent':
            'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'
        }
        postID = link.split('/embed/')[1]
        post_link = 'https://vidlink.org/embed/update_views'
        payload = {'postID': postID}
        headers = ua
        headers['X-Requested-With'] = 'XMLHttpRequest'
        headers['Referer'] = link
        ihtml = client.request(post_link, post=payload, headers=headers)
        linkcode = jsunpack.unpack(ihtml).replace('\\', '')
        try:
            extra_link = re.findall(r'var oploadID="(.+?)"', linkcode)[0]
            oload = 'https://openload.co/embed/' + extra_link
            sources.append({
                'source': 'openload.co',
                'quality': '1080p',
                'language': 'en',
                'url': oload,
                'direct': False,
                'debridonly': False
            })
        except Exception:
            pass
        links = re.findall(r'var file1="(.+?)"', linkcode)[0]
        stream_link = links.split('/pl/')[0]
        headers = {
            'Referer':
            'https://vidlink.org/',
            'User-Agent':
            'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:65.0) Gecko/20100101 Firefox/65.0'
        }
        response = client.request(links, headers=headers)
        urls = re.findall(r'[A-Z]{10}=\d+x(\d+)\W[A-Z]+=\"\w+\"\s+\/(.+?)\.',
                          response)
        if urls:
            for qual, url in urls:
                url = stream_link + '/' + url + '.m3u8'
                quality, info = source_utils.get_release_quality(qual, url)
                sources.append({
                    'source': 'GVIDEO',
                    'quality': quality,
                    'language': 'en',
                    'url': url,
                    'info': info,
                    'direct': True,
                    'debridonly': False
                })
        return sources
    except:
        return sources
    def sources(self, url, hostDict, hostprDict):
        sources = []
        try:
            if url is None:
                return sources

            url = urlparse.urljoin(self.base_link,
                                   url) if not url.startswith('http') else url

            result = client.request(url)
            data = re.findall(r'\s*(eval.+?)\s*</script', result, re.DOTALL)[1]
            data = jsunpack.unpack(data).replace('\\', '')

            patern = '''rtv='(.+?)';var aa='(.+?)';var ba='(.+?)';var ca='(.+?)';var da='(.+?)';var ea='(.+?)';var fa='(.+?)';var ia='(.+?)';var ja='(.+?)';var ka='(.+?)';'''
            links_url = re.findall(patern, data, re.DOTALL)[0]
            slug = 'slug={}'.format(url.split('/')[-1])
            links_url = self.base_link + [''.join(links_url)][0].replace(
                'slug=', slug)
            links = client.request(links_url)
            links = client.parseDOM(links, 'tbody')

            for i in links:
                try:
                    data = [(client.parseDOM(i, 'a', ret='href')[0],
                             client.parseDOM(i,
                                             'span',
                                             attrs={'class':
                                                    'version_host'})[0])][0]
                    url = urlparse.urljoin(self.base_link, data[0])
                    url = client.replaceHTMLCodes(url)
                    url = url.encode('utf-8')

                    host = data[1]
                    valid, host = source_utils.is_host_valid(host, hostDict)
                    if not valid:
                        raise Exception()

                    quality = client.parseDOM(i, 'span', ret='class')[0]
                    quality, info = source_utils.get_release_quality(
                        quality, url)

                    sources.append({
                        'source': host,
                        'quality': quality,
                        'language': 'en',
                        'url': url,
                        'direct': False,
                        'debridonly': False
                    })
                except BaseException:
                    pass

            return sources
        except Exception:
            return sources
def TEST_RUN():
    from openscrapers.modules import jsunpack
    from openscrapers.modules import log_utils
    log_utils.log('#####################################')
    url = 'https://site.com'
    data = get(url, Type='cfscrape')
    packed = find_match(data, "text/javascript'>(eval.*?)\s*</script>")
    unpacked = jsunpack.unpack(packed)
    log_utils.log('---getSum TEST_RUN - unpacked: \n' + str(unpacked))
    log_utils.log('#####################################')
    return unpacked
 def resolve(self, url):
     if '/stream/' in url or '/watch/' in url:
         r = self.scraper.get(url, referer=self.base_link).content
         link = client.parseDOM(r,
                                'a',
                                ret='data-href',
                                attrs={'id': 'iframe_play'})[0]
     else:
         data = self.scraper.get(url, referer=self.base_link).content
         data = re.findall(r'\s*(eval.+?)\s*</script', data, re.DOTALL)[0]
         link = jsunpack.unpack(data)
         link = link.replace('\\', '')
         link = re.findall(r'''go\(['"](.+?)['"]\)''', link)[0]
     return link
def unpacked(url):
    try:
        from openscrapers.modules import client
        from openscrapers.modules import jsunpack
        from openscrapers.modules import log_utils
        unpacked = ''
        html = client.request(url)
        if jsunpack.detect(html):
            unpacked = jsunpack.unpack(html)
            #log_utils.log('WatchWrestling - unpacked: \n' + str(unpacked))
        else:
            log_utils.log('getSum - unpacked - Failed.')
        return unpacked
    except:
        return
    def sources(self, url, hostDict, hostprDict):
        try:
            sources = []

            if url == None: return sources

            if (self.user != '' and self.password != ''):  # raise Exception()

                login = urlparse.urljoin(self.base_link, '/login.html')

                post = urllib.urlencode({
                    'username': self.user,
                    'password': self.password,
                    'submit': 'Login'
                })

                cookie = client.request(login,
                                        post=post,
                                        output='cookie',
                                        close=False)

                r = client.request(login,
                                   post=post,
                                   cookie=cookie,
                                   output='extended')

                headers = {'User-Agent': r[3]['User-Agent'], 'Cookie': r[4]}
            else:
                headers = {}

            if not str(url).startswith('http'):

                data = urlparse.parse_qs(url)
                data = dict([(i, data[i][0]) if data[i] else (i, '')
                             for i in data])

                title = data['tvshowtitle'] if 'tvshowtitle' in data else data[
                    'title']

                year = data['year']

                query = urlparse.urljoin(self.base_link, self.search_link)

                post = urllib.urlencode({'searchapi2': title})

                r = client.request(query, post=post, headers=headers)

                if 'tvshowtitle' in data:
                    r = re.findall('(watch-tvshow-.+?-\d+\.html)', r)
                    r = [(i, re.findall('watch-tvshow-(.+?)-\d+\.html', i))
                         for i in r]
                else:
                    r = re.findall('(watch-movie-.+?-\d+\.html)', r)
                    r = [(i, re.findall('watch-movie-(.+?)-\d+\.html', i))
                         for i in r]

                r = [(i[0], i[1][0]) for i in r if len(i[1]) > 0]
                r = [
                    i for i in r
                    if cleantitle.get(title) == cleantitle.get(i[1])
                ]
                r = [i[0] for i in r][0]

                u = urlparse.urljoin(self.base_link, r)
                for i in range(3):
                    r = client.request(u, headers=headers)
                    if not 'failed' in r: break

                if 'season' in data and 'episode' in data:
                    r = re.findall('(episode-.+?-.+?\d+.+?\d+-\d+.html)', r)
                    r = [
                        i for i in r if '-s%02de%02d-' %
                        (int(data['season']),
                         int(data['episode'])) in i.lower()
                    ][0]

                    r = urlparse.urljoin(self.base_link, r)

                    r = client.request(r, headers=headers)

            else:
                r = urlparse.urljoin(self.base_link, url)

                r = client.request(r, post=post, headers=headers)

            quality = 'HD' if '-movie-' in r else 'SD'

            try:
                f = re.findall('''["']sources['"]\s*:\s*\[(.*?)\]''', r)[0]
                f = re.findall('''['"]*file['"]*\s*:\s*([^\(]+)''', f)[0]

                u = re.findall('function\s+%s[^{]+{\s*([^}]+)' % f, r)[0]
                u = re.findall(
                    '\[([^\]]+)[^+]+\+\s*([^.]+).*?getElementById\("([^"]+)',
                    u)[0]

                a = re.findall('var\s+%s\s*=\s*\[([^\]]+)' % u[1], r)[0]
                b = client.parseDOM(r, 'span', {'id': u[2]})[0]

                url = u[0] + a + b
                url = url.replace('"', '').replace(',', '').replace('\/', '/')
                url += '|' + urllib.urlencode(headers)
            except:
                try:
                    url = r = jsunpack.unpack(r)
                    url = url.replace('"', '')
                except:
                    url = re.findall(
                        r'sources[\'"]\s*:\s*\[.*?file[\'"]\s*:\s*(\w+)\(\).*function\s+\1\(\)\s*\{\s*return\([\'"]([^\'"]+)',
                        r, re.DOTALL)[0][1]

            sources.append({
                'source': 'cdn',
                'quality': quality,
                'language': 'en',
                'url': url,
                'direct': True,
                'debridonly': False,
                'autoplay': True
            })

            return sources
        except:
            failure = traceback.format_exc()
            log_utils.log('StreamLord - Exception: \n' + str(failure))
            return sources
Example #11
0
 def sources(self, url, hostDict, hostprDict):
     sources = []
     try:
         if not url:
             return sources
         url = urlparse.urljoin(self.base_link, url)
         r = client.request(url, output='extended')
         headers = r[3]
         headers.update({
             'Cookie': r[2].get('Set-Cookie'),
             'Referer': self.base_link
         })
         r = r[0]
         rels = dom_parser.parse_dom(r, 'nav', attrs={'class': 'player'})
         rels = dom_parser.parse_dom(rels, 'ul', attrs={'class': 'idTabs'})
         rels = dom_parser.parse_dom(rels, 'li')
         rels = [(dom_parser.parse_dom(i,
                                       'a',
                                       attrs={'class': 'options'},
                                       req='href'),
                  dom_parser.parse_dom(i, 'img', req='src')) for i in rels]
         rels = [(i[0][0].attrs['href'][1:],
                  re.findall('/flags/(\w+)\.png$', i[1][0].attrs['src']))
                 for i in rels if i[0] and i[1]]
         rels = [
             i[0] for i in rels if len(i[1]) > 0 and i[1][0].lower() == 'de'
         ]
         r = [dom_parser.parse_dom(r, 'div', attrs={'id': i}) for i in rels]
         links = re.findall('''(?:link|file)["']?\s*:\s*["'](.+?)["']''',
                            ''.join([i[0].content for i in r]))
         links += [
             l.attrs['src'] for i in r for l in dom_parser.parse_dom(
                 i, 'iframe', attrs={'class': 'metaframe'}, req='src')
         ]
         links += [
             l.attrs['src'] for i in r
             for l in dom_parser.parse_dom(i, 'source', req='src')
         ]
         for i in links:
             try:
                 i = re.sub('\[.+?\]|\[/.+?\]', '', i)
                 i = client.replaceHTMLCodes(i)
                 if '/play/' in i: i = urlparse.urljoin(self.base_link, i)
                 if self.domains[0] in i:
                     i = client.request(i, headers=headers, referer=url)
                     for x in re.findall('''\(["']?(.*)["']?\)''', i):
                         try:
                             i += jsunpack.unpack(
                                 base64.decodestring(
                                     re.sub('"\s*\+\s*"', '',
                                            x))).replace('\\', '')
                         except:
                             pass
                     for x in re.findall('(eval\s*\(function.*?)</script>',
                                         i, re.DOTALL):
                         try:
                             i += jsunpack.unpack(x).replace('\\', '')
                         except:
                             pass
                     links = [(match[0], match[1]) for match in re.findall(
                         '''['"]?file['"]?\s*:\s*['"]([^'"]+)['"][^}]*['"]?label['"]?\s*:\s*['"]([^'"]*)''',
                         i, re.DOTALL)]
                     links = [(x[0].replace('\/', '/'),
                               source_utils.label_to_quality(x[1]))
                              for x in links if '/no-video.mp4' not in x[0]]
                     doc_links = [
                         directstream.google(
                             'https://drive.google.com/file/d/%s/view' %
                             match)
                         for match in re.findall(
                             '''file:\s*["'](?:[^"']+youtu.be/([^"']+))''',
                             i, re.DOTALL)
                     ]
                     doc_links = [(u['url'], u['quality'])
                                  for x in doc_links if x for u in x]
                     links += doc_links
                     for url, quality in links:
                         if self.base_link in url:
                             url = url + '|Referer=' + self.base_link
                         sources.append({
                             'source': 'gvideo',
                             'quality': quality,
                             'language': 'de',
                             'url': url,
                             'direct': True,
                             'debridonly': False
                         })
                 else:
                     try:
                         # as long as URLResolver get no Update for this URL (So just a Temp-Solution)
                         did = re.findall(
                             'youtube.googleapis.com.*?docid=(\w+)', i)
                         if did:
                             i = 'https://drive.google.com/file/d/%s/view' % did[
                                 0]
                         valid, host = source_utils.is_host_valid(
                             i, hostDict)
                         if not valid: continue
                         urls, host, direct = source_utils.check_directstreams(
                             i, host)
                         for x in urls:
                             sources.append({
                                 'source': host,
                                 'quality': x['quality'],
                                 'language': 'de',
                                 'url': x['url'],
                                 'direct': direct,
                                 'debridonly': False
                             })
                     except:
                         pass
             except:
                 pass
         return sources
     except:
         return sources