def __get_gk_links(self, link, iframe_url):
     sources = {}
     data = {'link': link}
     headers = XHR
     headers.update({'Referer': iframe_url, 'User-Agent': USER_AGENT})
     html = self._http_get(GK_URL, data=data, headers=headers, cache_limit=.25)
     js_data = scraper_utils.parse_json(html, GK_URL)
     if 'link' in js_data:
         if isinstance(js_data['link'], basestring):
             stream_url = js_data['link']
             if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo':
                 for source in scraper_utils.parse_google(self, stream_url):
                     sources[source] = {'quality': scraper_utils.gv_get_quality(source), 'direct': True}
             else:
                 sources[stream_url] = {'quality': QUALITIES.HIGH, 'direct': False}
         else:
             for link in js_data['link']:
                 stream_url = link['link']
                 if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo':
                     quality = scraper_utils.gv_get_quality(stream_url)
                 elif 'label' in link:
                     quality = scraper_utils.height_get_quality(link['label'])
                 else:
                     quality = QUALITIES.HIGH
                 sources[stream_url] = {'quality': quality, 'direct': True}
     return sources
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(url, cache_limit=.5)

        match = re.search('((?:pic|emb|vb|dir|emb2)=[^<]+)', html)
        if match:
            embeds = match.group(1)
            for stream_url in embeds.split('&'):
                if stream_url.startswith('dir='):
                    headers = {'Referer': url}
                    html = self._http_get(DIR_URL, params={'v': stream_url[3:]}, headers=headers, auth=False, allow_redirect=False, cache_limit=.5)
                    if html.startswith('http'):
                        stream_url = html + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua(), 'Referer': url})
                        host = scraper_utils.get_direct_hostname(self, stream_url)
                        direct = True
                        quality = QUALITIES.HD720
                    else:
                        continue
                elif stream_url.startswith('vb='):
                    stream_url = 'http://www.vidbux.com/%s' % (stream_url[3:])
                    host = 'vidbux.com'
                    direct = False
                    quality = scraper_utils.get_quality(video, host, QUALITIES.HD1080)
                elif stream_url.startswith('pic='):
                    data = {'url': stream_url[4:]}
                    html = self._http_get(PHP_URL, data=data, auth=False, cache_limit=1)
                    js_data = scraper_utils.parse_json(html, PHP_URL)
                    host = scraper_utils.get_direct_hostname(self, stream_url)
                    direct = True
                    for item in js_data:
                        if item.get('medium') == 'video':
                            stream_url = item['url']
                            quality = scraper_utils.width_get_quality(item['width'])
                            break
                    else:
                        continue
                elif stream_url.startswith(('emb=', 'emb2=')):
                    stream_url = re.sub('emb\d*=', '', stream_url)
                    host = urlparse.urlparse(stream_url).hostname
                    direct = False
                    quality = scraper_utils.get_quality(video, host, QUALITIES.HD720)
                else:
                    continue

                hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': direct}
                hosters.append(hoster)

        return hosters
Beispiel #3
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(page_url, require_debrid=True, cache_limit=.5)
        fragment = dom_parser2.parse_dom(html, 'table',
                                         {'class': 'links-table'})
        if not fragment: return hosters
        for _attrs, row in dom_parser2.parse_dom(fragment[0].content, 'tr'):
            match = re.search(
                "playVideo\.bind\(.*?'([^']+)(?:[^>]*>){2}(.*?)</td>", row,
                re.DOTALL)
            if not match: continue

            stream_url, release = match.groups()
            if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo':
                sources = scraper_utils.parse_google(self, stream_url)
            else:
                sources = [stream_url]

            for source in sources:
                host = scraper_utils.get_direct_hostname(self, source)
                if host == 'gvideo':
                    quality = scraper_utils.gv_get_quality(source)
                    direct = True
                else:
                    host = urlparse.urlparse(source).hostname
                    if video.video_type == VIDEO_TYPES.MOVIE:
                        meta = scraper_utils.parse_movie_link(release)
                    else:
                        meta = scraper_utils.parse_episode_link(release)
                    base_quality = scraper_utils.height_get_quality(
                        meta['height'])
                    quality = scraper_utils.get_quality(
                        video, host, base_quality)
                    direct = False
                hoster = {
                    'multi-part': False,
                    'host': host,
                    'class': self,
                    'quality': quality,
                    'views': None,
                    'rating': None,
                    'url': source,
                    'direct': direct
                }
                hosters.append(hoster)

        return hosters
Beispiel #4
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        headers = {
            'User-Agent': scraper_utils.get_ua(),
            'Referer': self.base_url + source_url
        }
        if video.video_type == VIDEO_TYPES.MOVIE:
            meta = scraper_utils.parse_movie_link(source_url)
            stream_url = source_url + scraper_utils.append_headers(headers)
            quality = scraper_utils.height_get_quality(meta['height'])
            hoster = {
                'multi-part': False,
                'host': scraper_utils.get_direct_hostname(self, stream_url),
                'class': self,
                'quality': quality,
                'views': None,
                'rating': None,
                'url': stream_url,
                'direct': True
            }
            if 'format' in meta: hoster['format'] = meta['format']
            hosters.append(hoster)
        else:
            for episode in self.__match_episode(source_url, video):
                meta = scraper_utils.parse_episode_link(episode['title'])
                stream_url = episode['url'] + scraper_utils.append_headers(
                    headers)
                stream_url = stream_url.replace(self.base_url, '')
                quality = scraper_utils.height_get_quality(meta['height'])
                hoster = {
                    'multi-part': False,
                    'host':
                    scraper_utils.get_direct_hostname(self, stream_url),
                    'class': self,
                    'quality': quality,
                    'views': None,
                    'rating': None,
                    'url': stream_url,
                    'direct': True
                }
                if 'format' in meta: hoster['format'] = meta['format']
                if 'size' in episode:
                    hoster['size'] = scraper_utils.format_size(
                        int(episode['size']))
                hosters.append(hoster)

        return hosters
 def __get_links_from_json2(self, url, page_url, video_type):
     sources = {}
     headers = {'Referer': page_url}
     headers.update(XHR)
     html = self._http_get(url, headers=headers, cache_limit=0)
     js_data = scraper_utils.parse_json(html, url)
     try:
         playlist = js_data.get('playlist', [])
         for source in playlist[0].get('sources', []):
             stream_url = source['file']
             if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo':
                 quality = scraper_utils.gv_get_quality(stream_url)
             elif 'label' in source:
                 quality = scraper_utils.height_get_quality(source['label'])
             else:
                 if video_type == VIDEO_TYPES.MOVIE:
                     meta = scraper_utils.parse_movie_link(stream_url)
                 else:
                     meta = scraper_utils.parse_episode_link(stream_url)
                 quality = scraper_utils.height_get_quality(meta['height'])
             sources[stream_url] = {'quality': quality, 'direct': True}
             logger.log('Adding stream: %s Quality: %s' % (stream_url, quality), log_utils.LOGDEBUG)
     except Exception as e:
         logger.log('Exception during yesmovies extract: %s' % (e), log_utils.LOGDEBUG)
     return sources
 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     page_url = scraper_utils.urljoin(self.base_url, source_url)
     html = self._http_get(page_url, cache_limit=1)
     pages = self.__get_alt_pages(html, page_url)
     sources = self.__get_sources(html, page_url, pages.get(page_url, True))
     for page in pages:
         if page == page_url: continue
         page_url = scraper_utils.urljoin(self.base_url, page, pages[page])
         html = self._http_get(page_url, cache_limit=1)
         sources.update(self.__get_sources(html, page, pages[page]))
         
     for stream_url, values in sources.iteritems():
         host = scraper_utils.get_direct_hostname(self, stream_url)
         if host == 'gvideo':
             quality = scraper_utils.gv_get_quality(stream_url)
             direct = True
         elif values['direct']:
             quality = values['quality']
             direct = True
         else:
             quality = values['quality']
             direct = False
             host = urlparse.urlparse(stream_url).hostname
         
         hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': direct}
         if values['subs']: hoster['subs'] = 'Turkish Subtitles'
         hosters.append(hoster)
             
     return hosters
    def get_sources(self, video):
        hosters = []
        sources = {}
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(page_url, cache_limit=.5)
        for match in re.finditer('player-data="([^"]+)[^>]+episode-data="([^"]+)[^>]*>(.*?)</a>', html, re.DOTALL):
            player_url, ep_id, label = match.groups()
            if video.video_type == VIDEO_TYPES.EPISODE and not self.__episode_match(video, ep_id):
                continue
            label = label.strip()
            headers = {'Referer': page_url}
            if re.match('https?://embed', player_url):
                src_html = self._http_get(player_url, headers=headers, cache_limit=.5)
                sources.update(scraper_utils.parse_sources_list(self, src_html))
                sources.update(self.__get_sources(src_html, label))
            else:
                sources[player_url] = {'direct': False, 'quality': Q_MAP.get(label.upper(), QUALITIES.HIGH)}
                    
        for source, value in sources.iteritems():
            direct = value['direct']
            quality = value['quality']
            if direct:
                host = scraper_utils.get_direct_hostname(self, source)
                stream_url = source + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
            else:
                host = urlparse.urlparse(source).hostname
                stream_url = source

            hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': direct}
            hosters.append(hoster)
            
        return hosters
Beispiel #8
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(url, cache_limit=8)
        fragment = dom_parser2.parse_dom(html, 'div', {'class': 'playex'})
        if fragment: html = fragment[0].content
        links = scraper_utils.parse_sources_list(self, html)
        for link in links:
            stream_url = link
            if self.base_url in link:
                redir_url = self._http_get(link, headers={'Referer': url}, allow_redirect=False, method='HEAD')
                if redir_url.startswith('http'):
                    stream_url = redir_url
            
            host = scraper_utils.get_direct_hostname(self, stream_url)
            if host == 'gvideo':
                quality = scraper_utils.gv_get_quality(stream_url)
            else:
                quality = links[link]['quality']
                stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua(), 'Referer': url})
                
            source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': True}
            hosters.append(source)

        return hosters
 def __get_king_links(self, iframe_url):
     hosters = []
     match = re.search('v=(.*)', iframe_url)
     if match:
         data = {'ID': match.group(1)}
         headers = {'Referer': iframe_url}
         headers.update(XHR)
         xhr_url = iframe_url.split('?')[0]
         html = self._http_get(xhr_url, params={'p': 'GetVideoSources'}, data=data, headers=headers, cache_limit=.5)
         js_data = scraper_utils.parse_json(html, xhr_url)
         try:
             for source in js_data['VideoSources']:
                 stream_url = source['file'] + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
                 host = scraper_utils.get_direct_hostname(self, source['file'])
                 label = source.get('label', '')
                 if host == 'gvideo':
                     quality = scraper_utils.gv_get_quality(source['file'])
                 elif re.search('\d+p?', label):
                     quality = scraper_utils.height_get_quality(label)
                 else:
                     quality = QUALITY_MAP.get(label, QUALITIES.HIGH)
                 hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True, 'subs': 'Turkish Subtitles'}
                 hosters.append(hoster)
         except:
             pass
         
     return hosters
Beispiel #10
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(url, cache_limit=8)
        fragment = dom_parser2.parse_dom(html, 'div', {'class': 'playex'})
        if fragment: html = fragment[0].content
        links = scraper_utils.parse_sources_list(self, html)
        for link in links:
            stream_url = link
            if self.base_url in link:
                redir_url = self._http_get(link, headers={'Referer': url}, allow_redirect=False, method='HEAD')
                if redir_url.startswith('http'):
                    stream_url = redir_url
            
            host = scraper_utils.get_direct_hostname(self, stream_url)
            if host == 'gvideo':
                quality = scraper_utils.gv_get_quality(stream_url)
            else:
                quality = links[link]['quality']
                stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua(), 'Referer': url})
                
            source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': True}
            hosters.append(source)

        return hosters
Beispiel #11
0
    def __get_links_from_xml(self, url, video, page_url, cookies):
        sources = {}
        try:
            headers = {'Referer': page_url}
            xml = self._http_get(url,
                                 cookies=cookies,
                                 headers=headers,
                                 cache_limit=.5)
            root = ET.fromstring(xml)
            for item in root.findall('.//item'):
                title = item.find('title').text
                if title and title.upper() == 'OOPS!': continue
                for source in item.findall('{http://rss.jwpcdn.com/}source'):
                    stream_url = source.get('file')
                    label = source.get('label')
                    if scraper_utils.get_direct_hostname(
                            self, stream_url) == 'gvideo':
                        quality = scraper_utils.gv_get_quality(stream_url)
                    elif label:
                        quality = scraper_utils.height_get_quality(label)
                    elif title:
                        quality = scraper_utils.blog_get_quality(
                            video, title, '')
                    else:
                        quality = scraper_utils.blog_get_quality(
                            video, stream_url, '')
                    sources[stream_url] = {'quality': quality, 'direct': True}
                    logger.log(
                        'Adding stream: %s Quality: %s' %
                        (stream_url, quality), log_utils.LOGDEBUG)
        except Exception as e:
            logger.log('Exception during YesMovies XML Parse: %s' % (e),
                       log_utils.LOGWARNING)

        return sources
Beispiel #12
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(page_url, cache_limit=8)
        fragment = dom_parser2.parse_dom(html, 'div', {'class': 'playex'})
        if fragment: html = fragment[0].content
        iframe_url = dom_parser2.parse_dom(html, 'iframe', req='src')
        if not iframe_url: return hosters
        iframe_url = iframe_url[0].attrs['src']
        if iframe_url.startswith('/'):
            iframe_url = scraper_utils.urljoin(self.base_url, iframe_url)
        html = self._http_get(iframe_url, headers={'Referer': page_url}, cache_limit=.5)
        obj = dom_parser2.parse_dom(html, 'object', req='data')
        if obj:
            streams = dict((stream_url, {'quality': scraper_utils.gv_get_quality(stream_url), 'direct': True}) for stream_url in
                           scraper_utils.parse_google(self, obj[0].attrs['data']))
        else:
            streams = scraper_utils.parse_sources_list(self, html)
            
        for stream_url, values in streams.iteritems():
            host = scraper_utils.get_direct_hostname(self, stream_url)
            if host == 'gvideo':
                quality = scraper_utils.gv_get_quality(stream_url)
            else:
                quality = values['quality']
                stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua(), 'Referer': page_url})
                 
            source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': True}
            hosters.append(source)

        return hosters
    def __get_episode_sources(self, source_url, video):
        hosters = []
        links = self.__find_episode(source_url, video)
        if not links: return hosters
        hash_data = self.__get_hash_data([link[0] for link in links])
        for link in links:
            try:
                status = hash_data['hashes'][link[0]]['status']
            except KeyError:
                status = ''
            if status.lower() != 'finished': continue
            stream_url = 'hash_id=%s' % (link[0])
            host = scraper_utils.get_direct_hostname(self, stream_url)
            quality = scraper_utils.blog_get_quality(video, link[1], '')
            hoster = {
                'multi-part': False,
                'class': self,
                'views': None,
                'url': stream_url,
                'rating': None,
                'host': host,
                'quality': quality,
                'direct': True
            }
            hoster['extra'] = link[1]
            hosters.append(hoster)

        return hosters
 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     page_url = scraper_utils.urljoin(self.base_url, source_url)
     html = self._http_get(page_url, cache_limit=.5)
     fragment = dom_parser2.parse_dom(html, 'div', {'class': 'film-container'})
     if fragment:
         iframe_url = dom_parser2.parse_dom(fragment[0].content, 'iframe', req='src')
         if iframe_url:
             iframe_url = scraper_utils.urljoin(self.base_url, iframe_url[0].attrs['src'])
             headers = {'Referer': page_url}
             html = self._http_get(iframe_url, headers=headers, cache_limit=.5)
             sources = scraper_utils.parse_sources_list(self, html)
             for source in sources:
                 quality = sources[source]['quality']
                 host = scraper_utils.get_direct_hostname(self, source)
                 stream_url = source + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua(), 'Referer': iframe_url})
                 hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
                 match = re.search('(\d+[a-z]bps)', source)
                 if match:
                     hoster['extra'] = match.group(1)
                 hosters.append(hoster)
                     
     hosters.sort(key=lambda x: x.get('extra', ''), reverse=True)
     return hosters
    def __get_links_from_xml(self, url, video, page_url, cookies):
        sources = {}
        try:
            headers = {'Referer': page_url}
            xml = self._http_get(url, cookies=cookies, headers=headers, cache_limit=.5)
            root = ET.fromstring(xml)
            for item in root.findall('.//item'):
                title = item.find('title').text
                if title and title.upper() == 'OOPS!': continue
                for source in item.findall('{http://rss.jwpcdn.com/}source'):
                    stream_url = source.get('file')
                    label = source.get('label')
                    if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo':
                        quality = scraper_utils.gv_get_quality(stream_url)
                    elif label:
                        quality = scraper_utils.height_get_quality(label)
                    elif title:
                        quality = scraper_utils.blog_get_quality(video, title, '')
                    else:
                        quality = scraper_utils.blog_get_quality(video, stream_url, '')
                    sources[stream_url] = {'quality': quality, 'direct': True}
                    logger.log('Adding stream: %s Quality: %s' % (stream_url, quality), log_utils.LOGDEBUG)
        except Exception as e:
            logger.log('Exception during YesMovies XML Parse: %s' % (e), log_utils.LOGWARNING)

        return sources
Beispiel #16
0
 def get_sources(self, video):
     hosters = []
     source_url = self.get_url(video)
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     query = scraper_utils.parse_query(source_url)
     if 'id' in query:
         vid_type = 'movies' if video.video_type == VIDEO_TYPES.MOVIE else 'episodes'
         url = scraper_utils.urljoin(
             self.base_url, '/api/v2/%s/%s' % (vid_type, query['id']))
         js_data = self._http_get(url, cache_limit=.5)
         if 'url' in js_data:
             stream_url = js_data['url']
             quality = QUALITIES.HD720
             hoster = {
                 'multi-part': False,
                 'host':
                 scraper_utils.get_direct_hostname(self, stream_url),
                 'class': self,
                 'url': stream_url,
                 'quality': quality,
                 'views': None,
                 'rating': None,
                 'direct': True
             }
             hosters.append(hoster)
     return hosters
Beispiel #17
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(url, cache_limit=8)
        for _attrs, fragment in dom_parser2.parse_dom(html, 'div', {'class': 'movieplay'}):
            iframe_src = dom_parser2.parse_dom(fragment, 'iframe', req='src')
            if iframe_src:
                iframe_src = iframe_src[0].attrs['src']
                if re.search('o(pen)?load', iframe_src, re.I):
                    meta = scraper_utils.parse_movie_link(iframe_src)
                    quality = scraper_utils.height_get_quality(meta['height'])
                    links = {iframe_src: {'quality': quality, 'direct': False}}
                else:
                    links = self.__get_links(iframe_src, url)

                for link in links:
                    direct = links[link]['direct']
                    quality = links[link]['quality']
                    if direct:
                        host = scraper_utils.get_direct_hostname(self, link)
                        if host == 'gvideo':
                            quality = scraper_utils.gv_get_quality(link)
                        stream_url = link + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua(), 'Referer': url})
                    else:
                        host = urlparse.urlparse(link).hostname
                        stream_url = link
                        
                    source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': direct}
                    hosters.append(source)

        return hosters
Beispiel #18
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        sources = []
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(page_url, cache_limit=1)
        iframes = dom_parser2.parse_dom(html, 'iframe', req='src')
        for attrs, _content in iframes:
            iframe_url = attrs['src']
            if 'docs.google.com' in iframe_url:
                sources = scraper_utils.parse_google(self, iframe_url)
                break
            else:
                iframe_url = scraper_utils.urljoin(self.base_url, iframe_url)
                html = self._http_get(iframe_url, cache_limit=1)
                iframes += dom_parser2.parse_dom(html, 'iframe', req='src')

        for source in sources:
            host = scraper_utils.get_direct_hostname(self, source)
            hoster = {
                'multi-part': False,
                'host': host,
                'class': self,
                'quality': scraper_utils.gv_get_quality(source),
                'views': None,
                'rating': None,
                'url': source,
                'direct': True
            }
            hosters.append(hoster)

        return hosters
Beispiel #19
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(page_url, cache_limit=8)
        for attrs, _content in dom_parser2.parse_dom(html, 'a', req='href'):
            stream_url = attrs['href']
            if MOVIE_URL in stream_url:
                meta = scraper_utils.parse_movie_link(stream_url)
                stream_url = scraper_utils.pathify_url(
                    stream_url) + scraper_utils.append_headers(
                        {'User-Agent': scraper_utils.get_ua()})
                quality = scraper_utils.height_get_quality(meta['height'])
                hoster = {
                    'multi-part': False,
                    'host':
                    scraper_utils.get_direct_hostname(self, stream_url),
                    'class': self,
                    'quality': quality,
                    'views': None,
                    'rating': None,
                    'url': stream_url,
                    'direct': True
                }
                if 'format' in meta: hoster['format'] = meta['format']
                hosters.append(hoster)

        return hosters
Beispiel #20
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(url, cache_limit=.5)
        sources = self.__get_posts(html)
        sources.update(self.__get_ajax(html, url))
        sources.update(self.__get_embedded(html, url))
        for source in sources:
            stream_url = source + scraper_utils.append_headers(
                {'User-Agent': scraper_utils.get_ua()})
            host = scraper_utils.get_direct_hostname(self, source)
            hoster = {
                'multi-part': False,
                'host': host,
                'class': self,
                'quality': sources[source],
                'views': None,
                'rating': None,
                'url': stream_url,
                'direct': True,
                'subs': 'Turkish subtitles'
            }
            hosters.append(hoster)

        return hosters
 def __get_gk_links(self, html):
     sources = {}
     match = re.search('{link\s*:\s*"([^"]+)', html)
     if match:
         iframe_url = match.group(1)
         data = {'link': iframe_url}
         headers = {'Referer': iframe_url}
         html = self._http_get(self.gk_url, data=data, headers=headers, cache_limit=.5)
         js_data = scraper_utils.parse_json(html, self.gk_url)
         links = js_data.get('link', [])
         if isinstance(links, basestring):
             links = [{'link': links}]
             
         for link in links:
             stream_url = link['link']
             if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo':
                 quality = scraper_utils.gv_get_quality(stream_url)
                 direct = True
             elif 'label' in link:
                 quality = scraper_utils.height_get_quality(link['label'])
                 direct = True
             else:
                 quality = QUALITIES.HIGH
                 direct = False
             sources[stream_url] = {'quality': quality, 'direct': direct}
     return sources
 def __get_sources(self, html, page_url):
     sources = []
     fragment = dom_parser2.parse_dom(html, 'div', {'class': 'video-content'})
     if fragment:
         referer = page_url
         iframes = dom_parser2.parse_dom(fragment[0].content, 'iframe', req='src')
         for attrs, _content in iframes:
             iframe_url = attrs['src']
             if self.base_url in iframe_url:
                 headers = {'Referer': referer}
                 html = self._http_get(iframe_url, headers=headers, cache_limit=.5)
                 referer = iframe_url
                 links = scraper_utils.parse_sources_list(self, html)
                 if links:
                     for link, values in links.iteritems():
                         host = scraper_utils.get_direct_hostname(self, link)
                         if host == 'gvideo':
                             quality = scraper_utils.gv_get_quality(link)
                         else:
                             quality = values['quality']
                         source = {'multi-part': False, 'url': link, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': True}
                         sources.append(source)
                 else:
                     iframes += dom_parser2.parse_dom(html, 'iframe', req='src')
             else:
                 host = urlparse.urlparse(iframe_url).hostname
                 source = {'multi-part': False, 'url': iframe_url, 'host': host, 'class': self, 'quality': QUALITIES.HIGH, 'views': None, 'rating': None, 'direct': False}
                 sources.append(source)
     return sources
Beispiel #23
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(page_url, cache_limit=.5)
        if video.video_type == VIDEO_TYPES.EPISODE:
            gk_html = ''.join(match.group(0) for match in re.finditer('<a[^>]*>(%s|Server \d+)</a>' % (video.episode), html, re.I))
        else:
            gk_html = html
        link_url = scraper_utils.urljoin(self.base_url, LINK_URL)
        player_url = scraper_utils.urljoin(self.base_url, PLAYER_URL)
        sources = scraper_utils.get_gk_links(self, gk_html, page_url, QUALITIES.HIGH, link_url, player_url)
        sources.update(self.__get_ht_links(html, page_url))
        
        for stream_url, quality in sources.iteritems():
            host = scraper_utils.get_direct_hostname(self, stream_url)
            if host == 'gvideo':
                direct = True
            else:
                host = urlparse.urlparse(stream_url).hostname
                direct = False
            
            if host is None: continue
            stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
            hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': direct}
            hosters.append(hoster)

        return hosters
Beispiel #24
0
 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     sources = []
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     page_url = scraper_utils.urljoin(self.base_url, source_url)
     html = self._http_get(page_url, cache_limit=1)
     fragment = dom_parser2.parse_dom(html, 'div', {'class': 'player'})
     if not fragment: return hosters
     
     iframe_url = dom_parser2.parse_dom(fragment[0].content, 'iframe', req='src')
     if not iframe_url: return hosters
     
     html = self._http_get(iframe_url[0].attrs['src'], cache_limit=.25)
     sources.append(self.__get_embedded_sources(html))
     sources.append(self.__get_linked_sources(html))
     for source in sources:
         for stream_url in source['sources']:
             host = scraper_utils.get_direct_hostname(self, stream_url)
             if host == 'gvideo':
                 stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
                 quality = scraper_utils.gv_get_quality(stream_url)
                 hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
                 hoster['subs'] = source.get('subs', True)
                 hosters.append(hoster)
 
     return hosters
Beispiel #25
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        if video.video_type == VIDEO_TYPES.EPISODE:
            html = self.__episode_match(video, source_url)
            sources = [r.attrs['data-click'] for r in dom_parser2.parse_dom(html, 'div', req='data-click') + dom_parser2.parse_dom(html, 'li', req='data-click')]
        else:
            sources = self.__get_movie_sources(page_url)
        sources = [source.strip() for source in sources if source]

        headers = {'Referer': page_url}
        for source in sources:
            if source.startswith('http'):
                direct = False
                quality = QUALITIES.HD720
                host = urlparse.urlparse(source).hostname
            else:
                source = self.__get_linked_source(source, headers)
                if source is None: continue
                direct = True
                host = scraper_utils.get_direct_hostname(self, source)
                if host == 'gvideo':
                    quality = scraper_utils.gv_get_quality(source)
                else:
                    pass
            
            hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': source, 'direct': direct}
            hosters.append(hoster)
            
        return hosters
Beispiel #26
0
    def get_sources(self, video):
        hosters = []
        sources = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(page_url, cache_limit=.25)
        match = re.search('var\s+view_id\s*=\s*"([^"]+)', html)
        if not match: return hosters
        view_id = match.group(1)
        
        for lang in ['or', 'tr']:
            subs = True if lang == 'tr' else False
            view_data = {'id': view_id, 'tip': 'view', 'dil': lang}
            html = self._http_get(self.ajax_url, data=view_data, headers=XHR, cache_limit=.25)
            html = html.strip()
            html = re.sub(r'\\n|\\t', '', html)
            match = re.search('var\s+sources\s*=\s*(\[.*?\])', html)
            if match:
                raw_data = match.group(1)
                raw_data = raw_data.replace('\\', '')
            else:
                raw_data = html
             
            js_data = scraper_utils.parse_json(raw_data, self.ajax_url)
            if 'data' not in js_data: continue
            
            src = dom_parser2.parse_dom(js_data['data'], 'iframe', req='src')
            if not src: continue
            
            html = self._http_get(src[0].attrs['src'], cache_limit=.25)
            for attrs, _content in dom_parser2.parse_dom(html, 'iframe', req='src'):
                src = attrs['src']
                if not src.startswith('http'): continue
                sources.append({'label': '720p', 'file': src, 'direct': False, 'subs': subs})
            
            sources += [{'file': url, 'subs': subs} for url in scraper_utils.parse_sources_list(self, html).iterkeys()]
            
            if sources: break

        for source in sources:
            direct = source.get('direct', True)
            stream_url = source['file'] + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
            if direct:
                host = scraper_utils.get_direct_hostname(self, stream_url)
                if host == 'gvideo':
                    quality = scraper_utils.gv_get_quality(stream_url)
                elif 'label' in source:
                    quality = scraper_utils.height_get_quality(source['label'])
                else:
                    continue
            else:
                host = urlparse.urlparse(stream_url).hostname
                quality = scraper_utils.height_get_quality(source['label'])
        
            hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': direct}
            if source.get('subs'): hoster['subs'] = 'Turkish Subtitles'
            hosters.append(hoster)
    
        return hosters
Beispiel #27
0
    def get_sources(self, video):
        sources = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return sources
        object_id = self.__extract_id(source_url)
        if object_id is None: return sources
        source_url = TITLE_URL.format(id=object_id)
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._authed_http_get(page_url, cache_limit=.5)
        js_data = scraper_utils.parse_json(html, page_url)
        if video.video_type == VIDEO_TYPES.MOVIE:
            links = js_data.get('links', {})
        else:
            links = self.__episode_match(js_data, video)

        prefix = js_data.get('domain', {}).get('prefix')
        suffix = js_data.get('domain', {}).get('suffix')
        for key, path in links.get('links', {}).iteritems():
            for mirror in sorted(list(set(links.get('mirrors', [])))):
                stream_url = TEMPLATE.format(prefix=prefix, mirror=mirror, suffix=suffix, path=path)
                host = scraper_utils.get_direct_hostname(self, stream_url)
                quality = Q_MAP.get(key, QUALITIES.HIGH)
                source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': True}
                source['version'] = '(Mirror %d)' % (mirror)
                sources.append(source)

        return sources
 def __get_links_from_json2(self, url, page_url, video_type):
     sources = {}
     headers = {'Referer': page_url}
     headers.update(XHR)
     html = self._http_get(url, headers=headers, cache_limit=0)
     js_data = scraper_utils.parse_json(html, url)
     try:
         playlist = js_data.get('playlist', [])
         for source in playlist[0].get('sources', []):
             stream_url = source['file']
             if scraper_utils.get_direct_hostname(self,
                                                  stream_url) == 'gvideo':
                 quality = scraper_utils.gv_get_quality(stream_url)
             elif 'label' in source:
                 quality = scraper_utils.height_get_quality(source['label'])
             else:
                 if video_type == VIDEO_TYPES.MOVIE:
                     meta = scraper_utils.parse_movie_link(stream_url)
                 else:
                     meta = scraper_utils.parse_episode_link(stream_url)
                 quality = scraper_utils.height_get_quality(meta['height'])
             sources[stream_url] = {'quality': quality, 'direct': True}
             logger.log(
                 'Adding stream: %s Quality: %s' % (stream_url, quality),
                 log_utils.LOGDEBUG)
     except Exception as e:
         logger.log('Exception during yesmovies extract: %s' % (e),
                    log_utils.LOGDEBUG)
     return sources
Beispiel #29
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        for stream in self.__get_videos(source_url, video):
            if video.video_type == VIDEO_TYPES.EPISODE and not scraper_utils.release_check(
                    video, stream['name']):
                continue

            host = scraper_utils.get_direct_hostname(self, stream['url'])
            hoster = {
                'multi-part': False,
                'class': self,
                'views': None,
                'url': stream['url'],
                'rating': None,
                'host': host,
                'quality': stream['quality'],
                'direct': True
            }
            if 'size' in stream:
                hoster['size'] = scraper_utils.format_size(stream['size'])
            if 'name' in stream: hoster['extra'] = stream['name']
            hosters.append(hoster)

        return hosters
 def __get_links_from_playlist(self, grab_url, headers):
     sources = {}
     grab_url = grab_url.replace('\\', '')
     grab_html = self._http_get(grab_url, headers=headers, cache_limit=.5)
     js_data = scraper_utils.parse_json(grab_html, grab_url)
     try: playlist = js_data['playlist'][0]['sources']
     except: playlist = []
     for item in playlist:
         stream_url = item.get('file')
         if stream_url:
             if stream_url.startswith('/'):
                 stream_url = scraper_utils.urljoin(self.base_url, stream_url)
                 redir_url = self._http_get(stream_url, headers=headers, allow_redirect=False, method='HEAD')
                 if redir_url.startswith('http'):
                     stream_url = redir_url
             
             if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo':
                 quality = scraper_utils.gv_get_quality(stream_url)
             elif 'label' in item:
                 quality = scraper_utils.height_get_quality(item['label'])
             else:
                 quality = QUALITIES.HIGH
             
             logger.log('Adding stream: %s Quality: %s' % (stream_url, quality), log_utils.LOGDEBUG)
             sources[stream_url] = {'quality': quality, 'direct': True}
             if not kodi.get_setting('scraper_url'): break
     return sources
Beispiel #31
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(page_url, cache_limit=0)
        match = re.search('var\s*video_id\s*=\s*"([^"]+)', html)
        if not match: return hosters
        
        video_id = match.group(1)
        headers = {'Referer': page_url}
        headers.update(XHR)
        _html = self._http_get(scraper_utils.urljoin(self.base_url, 'av'), headers=headers, method='POST', cache_limit=0)
        
        vid_url = scraper_utils.urljoin(self.base_url, VIDEO_URL)
        html = self._http_get(vid_url, data={'v': video_id}, headers=headers, cache_limit=0)
        for source, value in scraper_utils.parse_json(html, vid_url).iteritems():
            match = re.search('url=(.*)', value)
            if not match: continue
            stream_url = urllib.unquote(match.group(1))

            host = scraper_utils.get_direct_hostname(self, stream_url)
            if host == 'gvideo':
                quality = scraper_utils.gv_get_quality(stream_url)
            else:
                quality = scraper_utils.height_get_quality(source)
            stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
            hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
            hosters.append(hoster)
        return hosters
 def __get_links(self, html):
     hosters = []
     r = re.search('tlas\("([^"]+)', html)
     if r:
         plaintext = self.__caesar(
             self.__get_f(self.__caesar(r.group(1), 13)), 13)
         sources = scraper_utils.parse_sources_list(self, plaintext)
         for source in sources:
             stream_url = source + scraper_utils.append_headers(
                 {
                     'User-Agent': scraper_utils.get_ua(),
                     'Cookie': self._get_stream_cookies()
                 })
             host = scraper_utils.get_direct_hostname(self, stream_url)
             hoster = {
                 'multi-part': False,
                 'url': stream_url,
                 'host': host,
                 'class': self,
                 'quality': sources[source]['quality'],
                 'rating': None,
                 'views': None,
                 'direct': True
             }
             hosters.append(hoster)
     return hosters
    def __get_json_links(self, html, sub):
        hosters = []
        js_data = scraper_utils.parse_json(html)
        if 'sources' in js_data:
            for source in js_data.get('sources', []):
                stream_url = source.get('file')
                if stream_url is None: continue

                host = scraper_utils.get_direct_hostname(self, stream_url)
                if host == 'gvideo':
                    quality = scraper_utils.gv_get_quality(stream_url)
                elif 'label' in source:
                    quality = scraper_utils.height_get_quality(source['label'])
                else:
                    quality = QUALITIES.HIGH
                hoster = {
                    'multi-part': False,
                    'host': host,
                    'class': self,
                    'quality': quality,
                    'views': None,
                    'rating': None,
                    'url': stream_url,
                    'direct': True
                }
                hoster['subs'] = sub
                hosters.append(hoster)
        return hosters
 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     js_url = scraper_utils.urljoin(self.base_url, '/javascript/movies.js')
     html = self._http_get(js_url, cache_limit=48)
     if source_url.startswith('/'):
         source_url = source_url[1:]
     pattern = '''getElementById\(\s*"%s".*?play\(\s*'([^']+)''' % (source_url)
     match = re.search(pattern, html, re.I)
     if match:
         stream_url = match.group(1)
         if 'drive.google' in stream_url or 'docs.google' in stream_url:
             sources = scraper_utils.parse_google(self, stream_url)
         else:
             sources = [stream_url]
         
         for source in sources:
             stream_url = source + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
             host = scraper_utils.get_direct_hostname(self, source)
             if host == 'gvideo':
                 quality = scraper_utils.gv_get_quality(source)
                 direct = True
             elif 'youtube' in stream_url:
                 quality = QUALITIES.HD720
                 direct = False
                 host = 'youtube.com'
             else:
                 quality = QUALITIES.HIGH
                 direct = True
             hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': direct}
             hosters.append(hoster)
     return hosters
 def __get_links_from_playlist(self, grab_url, headers):
     sources = {}
     grab_url = grab_url.replace('\\', '')
     grab_html = self._http_get(grab_url, headers=headers, cache_limit=.5)
     js_data = scraper_utils.parse_json(grab_html, grab_url)
     try: playlist = js_data['playlist'][0]['sources']
     except: playlist = []
     for item in playlist:
         stream_url = item.get('file')
         if stream_url:
             if stream_url.startswith('/'):
                 stream_url = scraper_utils.urljoin(self.base_url, stream_url)
                 redir_url = self._http_get(stream_url, headers=headers, allow_redirect=False, method='HEAD')
                 if redir_url.startswith('http'):
                     stream_url = redir_url
             
             if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo':
                 quality = scraper_utils.gv_get_quality(stream_url)
             elif 'label' in item:
                 quality = scraper_utils.height_get_quality(item['label'])
             else:
                 quality = QUALITIES.HIGH
             
             logger.log('Adding stream: %s Quality: %s' % (stream_url, quality), log_utils.LOGDEBUG)
             sources[stream_url] = {'quality': quality, 'direct': True}
             if not kodi.get_setting('scraper_url'): break
     return sources
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(page_url, cache_limit=.5)
        for _attrs, fragment in dom_parser2.parse_dom(
                html, 'div', {'class': 'videoPlayer'}):
            for attrs, _content in dom_parser2.parse_dom(fragment,
                                                         'iframe',
                                                         req='src'):
                html = self._http_get(attrs['src'],
                                      headers={'Referer': page_url},
                                      cache_limit=.5)
                match = re.search('downloadUrl\s*=\s*"([^"]+)', html)
                if match:
                    stream_url = match.group(1)
                    host = scraper_utils.get_direct_hostname(self, stream_url)
                    if host == 'gvideo':
                        quality = scraper_utils.gv_get_quality(stream_url)
                    else:
                        quality = QUALITIES.HIGH
                    hoster = {
                        'multi-part': False,
                        'host': host,
                        'class': self,
                        'quality': quality,
                        'views': None,
                        'rating': None,
                        'url': stream_url,
                        'direct': True
                    }
                    hosters.append(hoster)

        return hosters
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(page_url, cache_limit=0)
        match = re.search('var\s*video_id\s*=\s*"([^"]+)', html)
        if not match: return hosters
        
        video_id = match.group(1)
        headers = {'Referer': page_url}
        headers.update(XHR)
        _html = self._http_get(scraper_utils.urljoin(self.base_url, 'av'), headers=headers, method='POST', cache_limit=0)
        
        vid_url = scraper_utils.urljoin(self.base_url, VIDEO_URL)
        html = self._http_get(vid_url, data={'v': video_id}, headers=headers, cache_limit=0)
        for source, value in scraper_utils.parse_json(html, vid_url).iteritems():
            match = re.search('url=(.*)', value)
            if not match: continue
            stream_url = urllib.unquote(match.group(1))

            host = scraper_utils.get_direct_hostname(self, stream_url)
            if host == 'gvideo':
                quality = scraper_utils.gv_get_quality(stream_url)
            else:
                quality = scraper_utils.height_get_quality(source)
            stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
            hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
            hosters.append(hoster)
        return hosters
 def __get_movie_sources(self, source_url):
     hosters = []
     query = kodi.parse_query(urlparse.urlparse(source_url).query)
     movie_id = query.get('movie_id') or self.__get_movie_id(source_url)
     if not movie_id: return hosters
     
     details_url = scraper_utils.urljoin(self.movie_base_url, MOVIE_DETAILS_URL)
     detail_data = self._json_get(details_url, params={'movie_id': movie_id}, cache_limit=24)
     try: torrents = detail_data['data']['movie']['torrents']
     except KeyError: torrents = []
     try: hashes = [torrent['hash'].lower() for torrent in torrents]
     except KeyError: hashes = []
     hash_data = self.__get_hash_data(hashes)
     for torrent in torrents:
         hash_id = torrent['hash'].lower()
         try: status = hash_data['hashes'][hash_id]['status']
         except KeyError: status = ''
         if status.lower() != 'finished': continue
         stream_url = 'hash_id=%s' % (hash_id)
         host = scraper_utils.get_direct_hostname(self, stream_url)
         quality = QUALITY_MAP.get(torrent['quality'], QUALITIES.HD720)
         hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': True}
         if 'size_bytes' in torrent: hoster['size'] = scraper_utils.format_size(torrent['size_bytes'], 'B')
         if torrent['quality'] == '3D': hoster['3D'] = True
         hosters.append(hoster)
     return hosters
Beispiel #39
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(url, cache_limit=.5)
        page_quality = QUALITIES.HD720 if video.video_type == VIDEO_TYPES.MOVIE else QUALITIES.HIGH
        for _attrs, fragment in dom_parser2.parse_dom(html, 'div', {'class': 'embed-responsive'}):
            iframe_url = dom_parser2.parse_dom(fragment, 'iframe', req='data-src')
            if iframe_url:
                iframe_url = iframe_url[0].attrs['data-src']
                iframe_host = urlparse.urlparse(iframe_url).hostname
                if iframe_host in DIRECT_HOSTS:
                    sources = self.__parse_streams(iframe_url, url)
                else:
                    sources = {iframe_url: {'quality': scraper_utils.get_quality(video, iframe_host, page_quality), 'direct': False}}
            
            for source in sources:
                quality = sources[source]['quality']
                direct = sources[source]['direct']
                if direct:
                    host = scraper_utils.get_direct_hostname(self, source)
                    stream_url = source + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
                else:
                    host = urlparse.urlparse(source).hostname
                    stream_url = source
                
                hoster = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': direct}
                hosters.append(hoster)

        return hosters
Beispiel #40
0
 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     page_url = scraper_utils.urljoin(self.base_url, source_url)
     html = self._http_get(page_url, cache_limit=.5)
     match = re.search('var\s*video_id="([^"]+)', html)
     if not match: return hosters
     
     video_id = match.group(1)
     data = {'v': video_id}
     headers = {'Referer': page_url}
     headers.update(XHR)
     html = self._http_get(INFO_URL, data=data, headers=headers, cache_limit=.5)
     sources = scraper_utils.parse_json(html, INFO_URL)
     for source in sources:
         match = re.search('url=(.*)', sources[source])
         if not match: continue
         
         stream_url = urllib.unquote(match.group(1))
         host = scraper_utils.get_direct_hostname(self, stream_url)
         if host == 'gvideo':
             quality = scraper_utils.gv_get_quality(stream_url)
         else:
             quality = scraper_utils.height_get_quality(source)
         stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
         hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
         hosters.append(hoster)
     return hosters
Beispiel #41
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        headers = {'Referer': page_url}
        html = self._http_get(page_url, headers=headers, cache_limit=.5)
        if video.video_type == VIDEO_TYPES.MOVIE:
            fragment = dom_parser2.parse_dom(html, 'div', {'class': 'poster'})
            if fragment:
                movie_url = dom_parser2.parse_dom(fragment[0].content, 'a', req='href')
                if movie_url:
                    page_url = scraper_utils.urljoin(self.base_url, movie_url[0].attrs['href'])
                    html = self._http_get(page_url, cache_limit=.5)
                    episodes = self.__get_episodes(html)
                    page_url = self.__get_best_page(episodes)
                    if not page_url:
                        return hosters
                    else:
                        page_url = scraper_utils.urljoin(self.base_url, page_url)
                        html = self._http_get(page_url, cache_limit=.5)
        
        streams = dom_parser2.parse_dom(html, 'iframe', req='src')
        if streams:
            streams = [(attrs['src'], 480) for attrs, _content in streams]
            direct = False
        else:
            streams = [(attrs['src'], attrs.get('data-res', 480)) for attrs, _content in dom_parser2.parse_dom(html, 'source', req=['src'])]
            direct = True
            
        headers = {'User-Agent': scraper_utils.get_ua(), 'Referer': page_url}
        for stream_url, height in streams:
            if 'video.php' in stream_url or 'moviexk.php' in stream_url:
                if 'title=' in stream_url:
                    title = stream_url.split('title=')[-1]
                    stream_url = stream_url.replace(title, urllib.quote(title))
                redir_url = self._http_get(stream_url, headers=headers, allow_redirect=False, method='HEAD', cache_limit=0)
                if redir_url.startswith('http'):
                    redir_url = redir_url.replace(' ', '').split(';codec')[0]
                    stream_url = redir_url
                else:
                    continue
            
            if direct:
                host = scraper_utils.get_direct_hostname(self, stream_url)
                if host == 'gvideo':
                    quality = scraper_utils.gv_get_quality(stream_url)
                else:
                    quality = scraper_utils.height_get_quality(height)
                stream_url += scraper_utils.append_headers(headers)
            else:
                host = urlparse.urlparse(stream_url).hostname
                quality = scraper_utils.height_get_quality(height)
            
            source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': direct}
            hosters.append(source)

        return hosters
Beispiel #42
0
 def __get_sources(self, html, page_url, subs):
     sources = {}
     player_div = dom_parser2.parse_dom(html, 'div', {'class': 'dzst-player'}, req='data-dzst-player')
     if player_div:
         js_html = scraper_utils.cleanse_title(player_div[0].attrs['data-dzst-player'].replace('&#x3D;', '='))
         js_data = scraper_utils.parse_json(js_html, page_url)
         links = js_data.get('tr', {})
         for height in links:
             stream_url = links[height]
             if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo':
                 quality = scraper_utils.gv_get_quality(stream_url)
             else:
                 quality = scraper_utils.height_get_quality(height)
             sources[stream_url] = {'direct': True, 'subs': subs, 'quality': quality}
     else:
         fragment = dom_parser2.parse_dom(html, 'div', {'class': 'video-player'})
         if fragment:
             fragment = fragment[0].content
             for _attrs, div in dom_parser2.parse_dom(fragment, 'div', {'class': 'ad-player'}):
                 fragment = fragment.replace(div, '')
 
             iframe_url = dom_parser2.parse_dom(fragment, 'iframe', req='src')
             if iframe_url:
                 iframe_url = iframe_url[0].attrs['src']
                 if 'dizist' in iframe_url:
                     html = self._http_get(iframe_url, headers={'Referer': page_url}, cache_limit=1)
                     return self.__get_sources(html, page_url, subs)
                 else:
                     parts = urlparse.urlparse(iframe_url)
                     if not parts.hostname:
                         iframe_url = scraper_utils.urljoin(self.base_url, iframe_url)
                         html = self._http_get(iframe_url, headers={'Referer': page_url}, cache_limit=1)
                         sources = scraper_utils.parse_sources_list(self, html, var='sources')
                         for value in sources.itervalues(): value['subs'] = subs
                     else:
                         if scraper_utils.get_direct_hostname(self, iframe_url) == 'gvideo':
                             direct = True
                         else:
                             direct = False
                         sources[iframe_url] = {'direct': direct, 'subs': subs, 'quality': QUALITIES.HD720}
             else:
                 sources = scraper_utils.parse_sources_list(self, fragment, var='sources')
                 for value in sources.itervalues(): value['subs'] = subs
         
     return sources
    def __create_source(self,
                        stream_url,
                        height,
                        page_url,
                        subs=False,
                        direct=True):
        if direct:
            stream_url = stream_url.replace('\\/', '/')
            if self.get_name().lower() in stream_url:
                headers = {'Referer': page_url}
                redir_url = self._http_get(stream_url,
                                           headers=headers,
                                           method='HEAD',
                                           allow_redirect=False,
                                           cache_limit=.25)
                if redir_url.startswith('http'):
                    stream_url = redir_url
                    stream_url += scraper_utils.append_headers(
                        {'User-Agent': scraper_utils.get_ua()})
                else:
                    stream_url += scraper_utils.append_headers({
                        'User-Agent':
                        scraper_utils.get_ua(),
                        'Referer':
                        page_url,
                        'Cookie':
                        self._get_stream_cookies()
                    })
            else:
                stream_url += scraper_utils.append_headers({
                    'User-Agent':
                    scraper_utils.get_ua(),
                    'Referer':
                    page_url
                })

            host = scraper_utils.get_direct_hostname(self, stream_url)
        else:
            host = urlparse.urlparse(stream_url).hostname

        if host == 'gvideo':
            quality = scraper_utils.gv_get_quality(stream_url)
        else:
            quality = scraper_utils.height_get_quality(height)

        hoster = {
            'multi-part': False,
            'host': host,
            'class': self,
            'quality': quality,
            'views': None,
            'rating': None,
            'url': stream_url,
            'direct': direct
        }
        if subs: hoster['subs'] = 'Turkish Subtitles'
        return hoster
 def __get_embed_links(self, html):
     hosters = []
     sources = scraper_utils.parse_sources_list(self, html)
     for source in sources:
         quality = source['quality']
         stream_url = source + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
         hoster = {'multi-part': False, 'host': scraper_utils.get_direct_hostname(self, source), 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True, 'subs': 'Turkish Subtitles'}
         hosters.append(hoster)
     return hosters
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        sources = {}
        headers = {'Accept-Language': 'en-US,en;q=0.5'}
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(page_url, headers=headers, cache_limit=2)
        if video.video_type == VIDEO_TYPES.MOVIE:
            sources.update(self.__scrape_sources(html, page_url))
            pages = set([
                r.attrs['href'] for r in dom_parser2.parse_dom(
                    html, 'a', {'class': 'btn-eps'}, req='href')
            ])
            active = set([
                r.attrs['href'] for r in dom_parser2.parse_dom(
                    html, 'a', {'class': 'active'}, req='href')
            ])
            for page in list(pages - active):
                page_url = scraper_utils.urljoin(self.base_url, page)
                html = self._http_get(page_url, headers=headers, cache_limit=2)
                sources.update(self.__scrape_sources(html, page_url))
        else:
            for page in self.__match_episode(video, html):
                page_url = scraper_utils.urljoin(self.base_url, page)
                html = self._http_get(page_url, headers=headers, cache_limit=2)
                sources.update(self.__scrape_sources(html, page_url))

        for source, values in sources.iteritems():
            if not source.lower().startswith('http'): continue
            if values['direct']:
                host = scraper_utils.get_direct_hostname(self, source)
                if host != 'gvideo':
                    stream_url = source + scraper_utils.append_headers(
                        {
                            'User-Agent': scraper_utils.get_ua(),
                            'Referer': page_url
                        })
                else:
                    stream_url = source
            else:
                host = urlparse.urlparse(source).hostname
                stream_url = source
            hoster = {
                'multi-part': False,
                'host': host,
                'class': self,
                'quality': values['quality'],
                'views': None,
                'rating': None,
                'url': stream_url,
                'direct': values['direct']
            }
            hosters.append(hoster)

        return hosters
 def __get_js_sources(self, js_data, page_url):
     hosters = []
     for key in js_data:
         if 'videolink' in key:
             stream_url = js_data[key]
             if scraper_utils.get_direct_hostname(self,
                                                  stream_url) == 'gvideo':
                 hosters.append(
                     self.__create_source(stream_url, 480, page_url))
     return hosters
 def __get_sources(self, html, label):
     sources = {}
     for attrs, _label in dom_parser2.parse_dom(html, 'source', req='src'):
         if scraper_utils.get_direct_hostname(self, attrs['src']) == 'gvideo':
             quality = scraper_utils.gv_get_quality(attrs['src'])
         else:
             quality = Q_MAP.get(label.upper(), QUALITIES.HIGH)
             
         sources[attrs['src']] = {'direct': True, 'quality': quality}
     return sources
Beispiel #48
0
 def __get_sources(self, html, page_url):
     sources = []
     fragment = dom_parser2.parse_dom(html, 'div',
                                      {'class': 'video-content'})
     if fragment:
         referer = page_url
         iframes = dom_parser2.parse_dom(fragment[0].content,
                                         'iframe',
                                         req='src')
         for attrs, _content in iframes:
             iframe_url = attrs['src']
             if self.base_url in iframe_url:
                 headers = {'Referer': referer}
                 html = self._http_get(iframe_url,
                                       headers=headers,
                                       cache_limit=.5)
                 referer = iframe_url
                 links = scraper_utils.parse_sources_list(self, html)
                 if links:
                     for link, values in links.iteritems():
                         host = scraper_utils.get_direct_hostname(
                             self, link)
                         if host == 'gvideo':
                             quality = scraper_utils.gv_get_quality(link)
                         else:
                             quality = values['quality']
                         source = {
                             'multi-part': False,
                             'url': link,
                             'host': host,
                             'class': self,
                             'quality': quality,
                             'views': None,
                             'rating': None,
                             'direct': True
                         }
                         sources.append(source)
                 else:
                     iframes += dom_parser2.parse_dom(html,
                                                      'iframe',
                                                      req='src')
             else:
                 host = urlparse.urlparse(iframe_url).hostname
                 source = {
                     'multi-part': False,
                     'url': iframe_url,
                     'host': host,
                     'class': self,
                     'quality': QUALITIES.HIGH,
                     'views': None,
                     'rating': None,
                     'direct': False
                 }
                 sources.append(source)
     return sources
    def __get_sources(self, html, label):
        sources = {}
        for attrs, _label in dom_parser2.parse_dom(html, 'source', req='src'):
            if scraper_utils.get_direct_hostname(self,
                                                 attrs['src']) == 'gvideo':
                quality = scraper_utils.gv_get_quality(attrs['src'])
            else:
                quality = Q_MAP.get(label.upper(), QUALITIES.HIGH)

            sources[attrs['src']] = {'direct': True, 'quality': quality}
        return sources
 def __get_cloud_links(self, html, page_url, sub):
     hosters = []
     html = html.replace('\\"', '"').replace('\\/', '/')
     match = re.search("dizi_kapak_getir\('([^']+)", html)
     if match:
         ep_id = match.group(1)
         for attrs, _content in dom_parser2.parse_dom(
                 html, 'script', {'data-cfasync': 'false'}, req='src'):
             script_url = attrs['src']
             html = self._http_get(script_url, cache_limit=24)
             match1 = re.search("var\s+kapak_url\s*=\s*'([^']+)", html)
             match2 = re.search("var\s+aCtkp\s*=\s*'([^']+)", html)
             if match1 and match2:
                 link_url = '%s?fileid=%s&access_token=%s' % (
                     match1.group(1), ep_id, match2.group(1))
                 headers = {'Referer': page_url}
                 html = self._http_get(link_url,
                                       headers=headers,
                                       cache_limit=.5)
                 js_data = scraper_utils.parse_json(html, link_url)
                 for variant in js_data.get('variants', {}):
                     stream_host = random.choice(variant.get('hosts', []))
                     if stream_host:
                         stream_url = stream_host + variant[
                             'path'] + scraper_utils.append_headers(
                                 {
                                     'User-Agent': scraper_utils.get_ua(),
                                     'Referer': page_url
                                 })
                         if not stream_url.startswith('http'):
                             stream_url = 'http://' + stream_url
                         host = scraper_utils.get_direct_hostname(
                             self, stream_url)
                         if 'width' in variant:
                             quality = scraper_utils.width_get_quality(
                                 variant['width'])
                         elif 'height' in variant:
                             quality = scraper_utils.height_get_quality(
                                 variant['height'])
                         else:
                             quality = QUALITIES.HIGH
                         hoster = {
                             'multi-part': False,
                             'host': host,
                             'class': self,
                             'quality': quality,
                             'views': None,
                             'rating': None,
                             'url': stream_url,
                             'direct': True
                         }
                         hoster['subs'] = sub
                         hosters.append(hoster)
     return hosters
Beispiel #51
0
 def __get_links(self, html):
     hosters = []
     r = re.search('tlas\("([^"]+)', html)
     if r:
         plaintext = self.__caesar(self.__get_f(self.__caesar(r.group(1), 13)), 13)
         sources = scraper_utils.parse_sources_list(self, plaintext)
         for source in sources:
             stream_url = source + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua(), 'Cookie': self._get_stream_cookies()})
             host = scraper_utils.get_direct_hostname(self, stream_url)
             hoster = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': sources[source]['quality'], 'rating': None, 'views': None, 'direct': True}
             hosters.append(hoster)
     return hosters
    def __get_links(self, url, video):
        hosters = []
        search_url, params = self.__translate_search(url)
        html = self._http_get(search_url, params=params, cache_limit=.5)
        js_result = scraper_utils.parse_json(html, search_url)
        down_url = js_result.get('downURL')
        dl_farm = js_result.get('dlFarm')
        dl_port = js_result.get('dlPort')
        for item in js_result.get('data', []):
            post_hash, size, post_title, ext, duration = item['0'], item['4'], item['10'], item['11'], item['14']
            checks = [False] * 6
            if not scraper_utils.release_check(video, post_title): checks[0] = True
            if 'alangs' in item and item['alangs'] and 'eng' not in item['alangs']: checks[1] = True
            if re.match('^\d+s', duration) or re.match('^[0-5]m', duration): checks[2] = True
            if 'passwd' in item and item['passwd']: checks[3] = True
            if 'virus' in item and item['virus']: checks[4] = True
            if 'type' in item and item['type'].upper() != 'VIDEO': checks[5] = True
            if any(checks):
                logger.log('EasyNews Post excluded: %s - |%s|' % (checks, item), log_utils.LOGDEBUG)
                continue
            
            stream_url = down_url + urllib.quote('/%s/%s/%s%s/%s%s' % (dl_farm, dl_port, post_hash, ext, post_title, ext))
            stream_url = stream_url + '|Authorization=%s' % (urllib.quote(self.auth))
            host = scraper_utils.get_direct_hostname(self, stream_url)
            quality = None
            if 'width' in item:
                try: width = int(item['width'])
                except: width = 0
                if width:
                    quality = scraper_utils.width_get_quality(width)
            
            if quality is None:
                if video.video_type == VIDEO_TYPES.MOVIE:
                    meta = scraper_utils.parse_movie_link(post_title)
                else:
                    meta = scraper_utils.parse_episode_link(post_title)
                quality = scraper_utils.height_get_quality(meta['height'])
                
            if self.max_bytes:
                match = re.search('([\d.]+)\s+(.*)', size)
                if match:
                    size_bytes = scraper_utils.to_bytes(*match.groups())
                    if size_bytes > self.max_bytes:
                        logger.log('Result skipped, Too big: |%s| - %s (%s) > %s (%s GB)' % (post_title, size_bytes, size, self.max_bytes, self.max_gb))
                        continue

            hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': True}
            if any(i for i in ['X265', 'HEVC'] if i in post_title.upper()): hoster['format'] = 'x265'
            if size: hoster['size'] = size
            if post_title: hoster['extra'] = post_title
            hosters.append(hoster)
        return hosters
 def __get_sources(self, html):
     sources = scraper_utils.parse_sources_list(self, html)
     for source in dom_parser2.parse_dom(html, 'source', {'type': 'video/mp4'}, req='src') + dom_parser2.parse_dom(html, 'iframe', req='src'):
         source = source.attrs['src']
         if scraper_utils.get_direct_hostname(self, source) == 'gvideo':
             quality = scraper_utils.gv_get_quality(source)
             direct = True
         else:
             quality = QUALITIES.HD720
             direct = False
         
         sources[source] = {'quality': quality, 'direct': direct}
     return self.__proc_sources(sources)
    def get_sources(self, video):
        source_url = self.get_url(video)
        sources = []
        if not source_url or source_url == FORCE_NO_MATCH: return sources
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(page_url, cache_limit=8)
        for attrs, _content in dom_parser2.parse_dom(html, 'img', req=['data-id', 'data-name']):
            film_id, data_name = attrs['data-id'], attrs['data-name']
            data = {'id': film_id, 'n': data_name}
            server_url = scraper_utils.urljoin(self.base_url, SERVER_URL)
            server_url = server_url % (film_id)
            headers = {'Referer': page_url}
            headers.update(XHR)
            html = self._http_get(server_url, data=data, headers=headers, cache_limit=.5)
            for attrs, _content in dom_parser2.parse_dom(html, 'a', req='data-id'):
                data = {'epid': attrs['data-id']}
                ep_url = scraper_utils.urljoin(self.base_url, EP_URL)
                ep_url = ep_url % (attrs['data-id'])
                headers = {'Referer': page_url}
                headers.update(XHR)
                html = self._http_get(ep_url, data=data, headers=headers, cache_limit=.5)
                js_data = scraper_utils.parse_json(html, ep_url)
                try:
                    links = [r.attrs['src'] for r in dom_parser2.parse_dom(js_data['link']['embed'], 'iframe', req='src')]
                except:
                    try: links = js_data['link']['l']
                    except: links = []
                try: heights = js_data['link']['q']
                except: heights = []
                for stream_url, height in map(None, links, heights):
                    match = re.search('movie_url=(.*)', stream_url)
                    if match:
                        stream_url = match.group(1)
                        
                    host = scraper_utils.get_direct_hostname(self, stream_url)
                    if host == 'gvideo':
                        quality = scraper_utils.gv_get_quality(stream_url)
                        stream_url += scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua(), 'Referer': page_url})
                        direct = True
                    else:
                        host = urlparse.urlparse(stream_url).hostname
                        if height:
                            quality = scraper_utils.height_get_quality(height)
                        else:
                            quality = QUALITIES.HD720
                        direct = False
                    source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': direct}
                    sources.append(source)

        return sources
 def get_sources(self, video):
     hosters = []
     source_url = self.get_url(video)
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     query = scraper_utils.parse_query(source_url)
     if 'id' in query:
         vid_type = 'movies' if video.video_type == VIDEO_TYPES.MOVIE else 'episodes'
         url = scraper_utils.urljoin(self.base_url, '/api/v2/%s/%s' % (vid_type, query['id']))
         js_data = self._http_get(url, cache_limit=.5)
         if 'url' in js_data:
             stream_url = js_data['url']
             quality = QUALITIES.HD720
             hoster = {'multi-part': False, 'host': scraper_utils.get_direct_hostname(self, stream_url), 'class': self, 'url': stream_url, 'quality': quality, 'views': None, 'rating': None, 'direct': True}
             hosters.append(hoster)
     return hosters