コード例 #1
0
    def resolve(self, url):
        try:
            # url = 'https://primewire.ink/go.php?title=Black-Panther&url=efdf73e70e3dc1c530e983a5bb2414365d813086==&id=384919&loggedin=0'
            if '/stream/' in url or '/watch/' in url:
                # r = self.scraper.get(url, self.headers).text
                r = client.request(url, referer=self.base_link)
                link = client.parseDOM(r,
                                       'a',
                                       ret='data-href',
                                       attrs={'id': 'iframe_play'})[0]
            else:
                try:
                    # data = self.scraper.get(url, self.headers).text
                    data = client.request(url, referer=self.base_link)
                    data = re.findall(r'\s*(eval.+?)\s*</script', data,
                                      re.DOTALL)[0]
                    link = jsunpack.unpack(data)
                    link = link.replace('\\', '')
                    if 'eval' in link:
                        link = jsunpack.unpack(link)
                    link = link.replace('\\', '')
                    host = re.findall('hosted=\'(.+?)\';var', link,
                                      re.DOTALL)[0]
                    if 'streamango' in host:
                        loc = re.findall('''loc\s*=\s*['"](.+?)['"]''', link,
                                         re.DOTALL)[0]
                        link = 'https://streamango.com/embed/{0}'.format(loc)
                    elif 'openload' in host:
                        loc = re.findall('''loc\s*=\s*['"](.+?)['"]''', link,
                                         re.DOTALL)[0]
                        link = 'https://openload.co/embed/{0}'.format(loc)
                    else:
                        link = re.findall('''loc\s*=\s*['"](.+?)['"]\;''',
                                          re.DOTALL)[0]
                except BaseException:
                    link = client.request(url, output='geturl', timeout=10)
                    print link
                    if link == url:
                        return
                    else:
                        return link

            return link
        except BaseException:
            return
コード例 #2
0
    def get_sources(self, url, title, year, season, episode, start_time):
        try:
            count = 0
            url = urlparse.urljoin(self.base_link,
                                   url) if url.startswith('/') else url

            r = client.request(url)
            data = re.findall(r'\s*(eval.+?)\s*</script', r, re.DOTALL)[1]
            data = jsunpack.unpack(data).replace('\\', '')

            # https://www.primewire.ink/ajax-78583.php?slug=watch-2809620-Black-Panther&cp=7TYP4N
            # var rtv=\'aja\';var aa=\'x-7\';var ba=\'85\';var ca=\'83\';var da=\'.ph\';var ea=\'p?sl\';var fa=\'ug=\';var ia=\'&cp=7T\';var ja=\'YP\';var ka=\'4N\';var code=ia+ja+ka;var page=rtv+aa+ba+ca+da+ea+fa;function goml(loc){$(\'#div1\').load(domain+page+loc+code)}
            patern = '''rtv='(.+?)';var aa='(.+?)';var ba='(.+?)';var ca='(.+?)';var da='(.+?)';var ea='(.+?)';var fa='(.+?)';var ia='(.+?)';var ja='(.+?)';var ka='(.+?)';'''
            links_url = re.findall(patern, data, re.DOTALL)[0]
            slug = 'slug={}'.format(url.split('/')[-1])
            links_url = self.base_link + [''.join(links_url)][0].replace(
                'slug=', slug)
            links = client.request(links_url)
            links = client.parseDOM(links, 'tbody')

            #xbmc.log('@#@LINKSSSS: %s' % links, xbmc.LOGNOTICE)
            for link in links:
                try:
                    data = [(client.parseDOM(link, 'a', ret='href')[0],
                             client.parseDOM(link,
                                             'span',
                                             attrs={'class':
                                                    'version_host'})[0])][0]
                    link = urlparse.urljoin(self.base_link, data[0])

                    host = data[1]

                    if not filter_host(host): continue

                    self.sources.append({
                        'source': host,
                        'quality': 'SD',
                        'scraper': self.name,
                        'url': link,
                        'direct': False
                    })
                except:
                    pass
            if dev_log == 'true':
                end_time = time.time() - start_time
                send_log(self.name,
                         end_time,
                         count,
                         title,
                         year,
                         season=season,
                         episode=episode)

        except Exception, argument:
            if dev_log == 'true':
                error_log(self.name, argument)
            return self.sources
コード例 #3
0
ファイル: watchtvseries.py プロジェクト: user135711/scrapers
 def resolve(self, link):
     try:
         from universalscrapers.modules import jsunpack
         data = client.request(link, referer=self.base_link)
         data = re.findall('\s*(eval.+?)\s*</script', data, re.DOTALL)[0]
         url = jsunpack.unpack(data)
         url = url.replace('\\', '')
         url = client.parseDOM(url, 'iframe', ret='src')[0]
         url = 'http:' + url if url.startswith('//') else url
         url = url.split('-"+')[0] if '+window' in url else url
         return url
     except:
         return link
コード例 #4
0
    def resolve(self, url):
        try:
            try:
                from universalscrapers.modules import jsunpack
                data = client.request(url, referer=self.base_link)
                data = re.findall('\s*(eval.+?)\s*</script', data,
                                  re.DOTALL)[0]
                link = jsunpack.unpack(data)
                link = link.replace('\\', '')
                link = re.findall('''go\(['"](.+?)['"]\)''', link)[0]
            except:
                link = client.request(url, output='geturl', timeout=10)
                if link == url:
                    return
                else:
                    return link

            return link
        except:
            return
コード例 #5
0
    def scrape_movie(self, title, year, imdb, debrid=False):
        links = []
        try:
            start_time = time.time()
            search_id = urllib.quote_plus('%s %s' %
                                          (clean_search(title), year))
            start_url = urlparse.urljoin(self.base_link,
                                         self.search_link % search_id)
            html = client.request(start_url)
            posts = client.parseDOM(html,
                                    'div',
                                    attrs={'class': 'cell_container'})
            posts = [i for i in posts if year in i]
            posts = [dom.parse_dom(i, 'a', req='href')[1] for i in posts if i]
            post = [
                i.attrs['href'] for i in posts
                if clean_title(title) == clean_title(i.content)
            ][0]

            mov_link = urlparse.urljoin(self.base_link, post)
            r = client.request(mov_link)
            res_chk = client.parseDOM(r, 'h1')[0]

            url = re.findall('''frame_url\s*=\s*["']([^']+)['"]\;''', r,
                             re.DOTALL)[0]
            furl = url if url.startswith('http') else urlparse.urljoin(
                'https://', url)

            try:
                r = client.request(furl)
                ua = {
                    'User-Agent':
                    'Mozilla/5.0 (Windows NT 6.1; rv:14.0) Gecko/20100101 Firefox/14.0.1'
                }
                id_view = client.request(
                    'https://vidlink.org/embed/update_views', headers=ua)
                id_view = json.loads(id_view)['id_view'].replace('\/', '/')
                postID = re.findall('''postID\s*=\s*['"]([^'"]+)['"]\;''',
                                    r)[0]
                try:
                    plink = 'https://vidlink.org/streamdrive/info'
                    data = {
                        'browserName': 'Firefox',
                        'platform': 'Win32',
                        'postID': postID,
                        'id_view': id_view
                    }
                    headers = ua
                    headers['X-Requested-With'] = 'XMLHttpRequest'
                    headers['Referer'] = url
                    ihtml = client.request(plink, post=data, headers=headers)
                    linkcode = jsunpack.unpack(ihtml).replace('\\', '')
                    linkcode = re.findall('window\.srcs\s*=\s*\[(.+?)\]\;',
                                          linkcode, re.DOTALL)[0]
                    frames = json.loads(linkcode)
                    link = frames['url']
                    links.append(link)
                except:
                    pass
                try:
                    plink = 'https://vidlink.org/opl/info'
                    post = 'postID=%s' % postID
                    headers = ua
                    headers['X-Requested-With'] = 'XMLHttpRequest'
                    headers['Referer'] = url
                    ihtml = client.request(plink, post=post, headers=headers)
                    ihtml = json.loads(ihtml)['id']
                    link = 'https://oload.icu/embed/%s' % ihtml
                    links.append(link)
                except:
                    pass
            except:
                pass

            count = 0
            #xbmc.log('@#@-LINKS:%s' % links, xbmc.LOGNOTICE)
            for link in links:
                if '1080' in res_chk:
                    res = '1080p'
                elif '720' in res_chk:
                    res = '720p'
                else:
                    res = 'DVD'

                count += 1
                if 'google' in link:
                    self.sources.append({
                        'source': 'Googlelink',
                        'quality': res,
                        'scraper': self.name,
                        'url': link,
                        'direct': True
                    })
                else:
                    self.sources.append({
                        'source': 'Openload',
                        'quality': res,
                        'scraper': self.name,
                        'url': link,
                        'direct': False
                    })

                if dev_log == 'true':
                    end_time = time.time() - start_time
                    send_log(self.name, end_time, count, title, year)
            return self.sources
        except Exception, argument:
            if dev_log == 'true':
                error_log(self.name, argument)


#putlockerhd().scrape_movie('Black Panther', '2018','')
コード例 #6
0
ファイル: watch32.py プロジェクト: varunrai/scrapers
    def scrape_movie(self, title, year, imdb, debrid=False):
        try:
            start_time = time.time() 
            search_id = clean_search(title.lower())
            start_url = urlparse.urljoin(self.base_link, self.search_link % urllib.quote_plus(search_id))
            headers={'User-Agent': client.agent()}
            html = client.request(start_url, headers=headers)
            results = client.parseDOM(html, 'div', attrs={'class': 'video_title'})

            items = []
            for item in results:
                try:
                    data = dom_parser.parse_dom(item, 'a', req=['href', 'title'])[0]
                    t = data.content
                    y = re.findall('\((\d{4})\)', data.attrs['title'])[0]
                    qual = data.attrs['title'].split('-')[1]
                    link = data.attrs['href']

                    if not clean_title(t) == clean_title(title): continue
                    if not y == year: continue

                    items += [(link, qual)]

                except:
                    pass
            for item in items:
                count = 0
                try:
                    url = item[0] if item[0].startswith('http') else urlparse.urljoin(self.base_link, item[0])
                    r = client.request(url)

                    qual = client.parseDOM(r, 'h1')[0]
                    res = quality_tags.get_release_quality(item[1], qual)[0]

                    url = re.findall('''frame_url\s*=\s*["']([^']+)['"]\;''', r, re.DOTALL)[0]
                    url = url if url.startswith('http') else urlparse.urljoin('https://', url)
                    if 'vidlink' in url:
                        html = client.request(url, headers=headers)
                        action = re.findall("action'\s*:\s*'([^']+)", html)[0]
                        postID = re.findall("postID\s*=\s*'([^']+)", html)[0]
                        url = 'https://vidlink.org' + re.findall("var\s*url\s*=\s*'([^']+)", html)[0]
                        data = {'browserName': 'Firefox',
                                'platform': 'Win32',
                                'postID': postID,
                                'action': action}

                        headers['X-Requested-With'] = 'XMLHttpRequest'
                        headers['Referer'] = url
                        html = client.request(url, post=data, headers=headers)
                        html = jsunpack.unpack(html).replace('\\', '')
                        sources = json.loads(re.findall('window\.srcs\s*=\s*([^;]+)', html, re.DOTALL)[0])
                        for src in sources:
                            r = requests.head(src['url'], headers={'User-Agent': client.agent()})
                            if r.status_code < 400:
                                movie_link = src['url']
                                count += 1
                                self.sources.append({'source': 'Googlelink', 'quality': res,
                                                    'scraper': self.name, 'url': movie_link, 'direct': True})
                            else:
                               continue

                except:
                    pass
                if dev_log=='true':
                    end_time = time.time() - start_time
                    send_log(self.name,end_time, count, title,year)
            #print self.sources
            return self.sources
        except Exception, argument:
            print argument
            if dev_log == 'true':
                error_log(self.name,argument)
            return self.sources

#watch32().scrape_movie('Black Panther', '2018', 'tt1825683', False)
コード例 #7
0
ファイル: watch32.py プロジェクト: varunrai/scrapers
    def scrape_movie(self, title, year, imdb, debrid=False):
        try:
            start_time = time.time()
            search_id = clean_search(title.lower())
            start_url = urlparse.urljoin(
                self.base_link,
                self.search_link % urllib.quote_plus(search_id))
            headers = {'User-Agent': client.agent()}
            html = client.request(start_url, headers=headers)
            results = client.parseDOM(html,
                                      'div',
                                      attrs={'class': 'video_title'})

            items = []
            for item in results:
                try:
                    data = dom_parser.parse_dom(item,
                                                'a',
                                                req=['href', 'title'])[0]
                    t = data.content
                    y = re.findall('\((\d{4})\)', data.attrs['title'])[0]
                    qual = data.attrs['title'].split('-')[1]
                    link = data.attrs['href']

                    if not clean_title(t) == clean_title(title): continue
                    if not y == year: continue

                    items += [(link, qual)]

                except:
                    pass
            for item in items:
                count = 0
                try:
                    url = item[0] if item[0].startswith(
                        'http') else urlparse.urljoin(self.base_link, item[0])
                    r = client.request(url)

                    qual = client.parseDOM(r, 'h1')[0]
                    res = quality_tags.get_release_quality(item[1], qual)[0]

                    url = re.findall('''frame_url\s*=\s*["']([^']+)['"]\;''',
                                     r, re.DOTALL)[0]
                    url = url if url.startswith('http') else urlparse.urljoin(
                        'https://', url)
                    if 'vidlink' in url:
                        html = client.request(url, headers=headers)
                        action = re.findall("action'\s*:\s*'([^']+)", html)[0]
                        postID = re.findall("postID\s*=\s*'([^']+)", html)[0]
                        url = 'https://vidlink.org' + re.findall(
                            "var\s*url\s*=\s*'([^']+)", html)[0]
                        data = {
                            'browserName': 'Firefox',
                            'platform': 'Win32',
                            'postID': postID,
                            'action': action
                        }

                        headers['X-Requested-With'] = 'XMLHttpRequest'
                        headers['Referer'] = url
                        html = client.request(url, post=data, headers=headers)
                        html = jsunpack.unpack(html).replace('\\', '')
                        sources = json.loads(
                            re.findall('window\.srcs\s*=\s*([^;]+)', html,
                                       re.DOTALL)[0])
                        for src in sources:
                            r = requests.head(
                                src['url'],
                                headers={'User-Agent': client.agent()})
                            if r.status_code < 400:
                                movie_link = src['url']
                                count += 1
                                self.sources.append({
                                    'source': 'Googlelink',
                                    'quality': res,
                                    'scraper': self.name,
                                    'url': movie_link,
                                    'direct': True
                                })
                            else:
                                continue

                except:
                    pass
                if dev_log == 'true':
                    end_time = time.time() - start_time
                    send_log(self.name, end_time, count, title, year)
            #print self.sources
            return self.sources
        except Exception, argument:
            print argument
            if dev_log == 'true':
                error_log(self.name, argument)
            return self.sources


#watch32().scrape_movie('Black Panther', '2018', 'tt1825683', False)