def get_sources(self, video):
     hosters = []
     source_url = self.get_url(video)
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     url = scraper_utils.urljoin(self.base_url, source_url)
     html = self._http_get(url, require_debrid=True, cache_limit=.5)
     for source, values in self.__get_post_links(html).iteritems():
         if scraper_utils.excluded_link(source): continue
         host = urlparse.urlparse(source).hostname
         release = values['release']
         quality = scraper_utils.blog_get_quality(video, release, host)
         hoster = {
             'multi-part': False,
             'host': host,
             'class': self,
             'views': None,
             'url': source,
             'rating': None,
             'quality': quality,
             'direct': False
         }
         if 'X265' in release or 'HEVC' in release:
             hoster['format'] = 'x265'
         hosters.append(hoster)
     return hosters
 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     url = scraper_utils.urljoin(self.base_url, source_url)
     html = self._http_get(url, require_debrid=True, cache_limit=.5)
     sources = self.__get_post_links(html)
     for source, value in sources.iteritems():
         if scraper_utils.excluded_link(source): continue
         host = urlparse.urlparse(source).hostname
         if video.video_type == VIDEO_TYPES.MOVIE:
             meta = scraper_utils.parse_movie_link(value['release'])
         else:
             meta = scraper_utils.parse_episode_link(value['release'])
         quality = scraper_utils.height_get_quality(meta['height'])
         hoster = {
             'multi-part': False,
             'host': host,
             'class': self,
             'views': None,
             'url': source,
             'rating': None,
             'quality': quality,
             'direct': False
         }
         if 'format' in meta: hoster['format'] = meta['format']
         hosters.append(hoster)
     return hosters
Beispiel #3
0
 def get_sources(self, video):
     hosters = []
     source_url = self.get_url(video)
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     url = scraper_utils.urljoin(self.base_url, source_url)
     html = self._http_get(url, require_debrid=True, cache_limit=.5)
     fragment = dom_parser2.parse_dom(html, 'div', {'class': 'post-cont'})
     if not fragment: return hosters
     
     match = re.search('<p>\s*<strong>(.*?)<script', fragment[0].content, re.DOTALL)
     if not match: return hosters
     
     for attrs, _content in dom_parser2.parse_dom(match.group(1), 'a', req='href'):
         stream_url = attrs['href']
         if scraper_utils.excluded_link(stream_url): continue
         if video.video_type == VIDEO_TYPES.MOVIE:
             meta = scraper_utils.parse_movie_link(stream_url)
         else:
             meta = scraper_utils.parse_episode_link(stream_url)
         
         host = urlparse.urlparse(stream_url).hostname
         quality = scraper_utils.get_quality(video, host, scraper_utils.height_get_quality(meta['height']))
         hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'quality': quality, 'direct': False}
         hosters.append(hoster)
             
     return hosters
 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     url = scraper_utils.urljoin(self.base_url, source_url)
     html = self._http_get(url, require_debrid=False, cache_limit=.5)
     title = dom_parser2.parse_dom(html,
                                   'meta', {'property': 'og:title'},
                                   req='content')
     meta = scraper_utils.parse_movie_link(
         title[0].attrs['content']) if title else {}
     fragment = dom_parser2.parse_dom(html, 'p',
                                      {'class': 'download_message'})
     if fragment:
         for attrs, _content in dom_parser2.parse_dom(fragment[0].content,
                                                      'a',
                                                      req='href'):
             source = attrs['href']
             if scraper_utils.excluded_link(source): continue
             host = urlparse.urlparse(source).hostname
             quality = scraper_utils.height_get_quality(
                 meta.get('height', 480))
             hoster = {
                 'multi-part': False,
                 'host': host,
                 'class': self,
                 'views': None,
                 'url': source,
                 'rating': None,
                 'quality': quality,
                 'direct': True
             }
             if 'format' in meta: hoster['format'] = meta['format']
             hosters.append(hoster)
     return hosters
 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     url = scraper_utils.urljoin(self.base_url, source_url)
     html = self._http_get(url,
                           headers=self.headers,
                           require_debrid=True,
                           cache_limit=.5)
     sources = self.__get_post_links(html, video)
     for source in sources:
         if scraper_utils.excluded_link(source): continue
         host = urlparse.urlparse(source).hostname
         hoster = {
             'multi-part': False,
             'host': host,
             'class': self,
             'views': None,
             'url': source,
             'rating': None,
             'quality': sources[source],
             'direct': False
         }
         hosters.append(hoster)
     return hosters
Beispiel #6
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        sources = {}
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(url, require_debrid=False, cache_limit=.5)
        if not html:
            url = scraper_utils.urljoin(self.old_base_url, source_url)
            html = self._http_get(url, require_debrid=False, cache_limit=.5)

        sources.update(self.__get_post_links(html, video))

        if kodi.get_setting('%s-include_comments' %
                            (self.get_name())) == 'true':
            for _attrs, comment in dom_parser2.parse_dom(
                    html, 'div', {'id': re.compile('commentbody-\d+')}):
                sources.update(self.__get_comment_links(comment, video))

        for source in sources:
            if scraper_utils.excluded_link(source): continue
            host = urlparse.urlparse(source).hostname
            hoster = {
                'multi-part': False,
                'host': host,
                'class': self,
                'views': None,
                'url': source,
                'rating': None,
                'quality': sources[source],
                'direct': False
            }
            hosters.append(hoster)
        return hosters
    def get_sources(self, video):
        hosters = []
        sources = {}
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(url, require_debrid=True, cache_limit=.5)
        fragment = dom_parser2.parse_dom(html, 'div',
                                         {'class': 'entry-content'})
        if fragment:
            for _attrs, td in dom_parser2.parse_dom(fragment[0].content, 'td'):
                for attrs, _content in dom_parser2.parse_dom(td,
                                                             'a',
                                                             req='href'):
                    meta = scraper_utils.parse_episode_link(attrs['href'])
                    sources[attrs['href']] = scraper_utils.height_get_quality(
                        meta['height'])

        for source, values in sources.iteritems():
            if scraper_utils.excluded_link(source): continue
            host = urlparse.urlparse(source).hostname
            hoster = {
                'multi-part': False,
                'host': host,
                'class': self,
                'views': None,
                'url': source,
                'rating': None,
                'quality': values,
                'direct': False
            }
            hosters.append(hoster)
        return hosters
Beispiel #8
0
 def get_sources(self, video):
     hosters = []
     source_url = self.get_url(video)
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     url = scraper_utils.urljoin(self.base_url, source_url)
     html = self._http_get(url, require_debrid=True, cache_limit=.5)
     fragment = dom_parser2.parse_dom(html, 'div', {'class': 'post-cont'})
     if not fragment: return hosters
     
     match = re.search('<p>\s*<strong>(.*?)<script', fragment[0].content, re.DOTALL)
     if not match: return hosters
     
     for attrs, _content in dom_parser2.parse_dom(match.group(1), 'a', req='href'):
         stream_url = attrs['href']
         if scraper_utils.excluded_link(stream_url): continue
         if video.video_type == VIDEO_TYPES.MOVIE:
             meta = scraper_utils.parse_movie_link(stream_url)
         else:
             meta = scraper_utils.parse_episode_link(stream_url)
         
         host = urlparse.urlparse(stream_url).hostname
         quality = scraper_utils.get_quality(video, host, scraper_utils.height_get_quality(meta['height']))
         hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'quality': quality, 'direct': False}
         hosters.append(hoster)
             
     return hosters
 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     url = scraper_utils.urljoin(self.base_url, source_url)
     html = self._http_get(url, headers=self.headers, require_debrid=True, cache_limit=.5)
     sources = self.__get_post_links(html, video)
     for source in sources:
         if scraper_utils.excluded_link(source): continue
         host = urlparse.urlparse(source).hostname
         hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': source, 'rating': None, 'quality': sources[source], 'direct': False}
         hosters.append(hoster)
     return hosters
 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     url = scraper_utils.urljoin(self.base_url, source_url)
     html = self._http_get(url, require_debrid=True, cache_limit=.5)
     sources = self.__get_post_links(html)
     for source, value in sources.iteritems():
         if scraper_utils.excluded_link(source): continue
         host = urlparse.urlparse(source).hostname
         if video.video_type == VIDEO_TYPES.MOVIE:
             meta = scraper_utils.parse_movie_link(value['release'])
         else:
             meta = scraper_utils.parse_episode_link(value['release'])
         quality = scraper_utils.height_get_quality(meta['height'])
         hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': source, 'rating': None, 'quality': quality, 'direct': False}
         if 'format' in meta: hoster['format'] = meta['format']
         hosters.append(hoster)
     return hosters
 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     url = scraper_utils.urljoin(self.base_url, source_url)
     html = self._http_get(url, require_debrid=True, cache_limit=.5)
     title = dom_parser2.parse_dom(html, 'meta', {'property': 'og:title'}, req='content')
     meta = scraper_utils.parse_movie_link(title[0].attrs['content']) if title else {}
     fragment = dom_parser2.parse_dom(html, 'p', {'class': 'download_message'})
     if fragment:
         for attrs, _content in dom_parser2.parse_dom(fragment[0].content, 'a', req='href'):
             source = attrs['href']
             if scraper_utils.excluded_link(source): continue
             host = urlparse.urlparse(source).hostname
             quality = scraper_utils.height_get_quality(meta.get('height', 480))
             hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': source, 'rating': None, 'quality': quality, 'direct': False}
             if 'format' in meta: hoster['format'] = meta['format']
             hosters.append(hoster)
     return hosters
    def get_sources(self, video):
        hosters = []
        sources = {}
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(url, require_debrid=True, cache_limit=.5)
        fragment = dom_parser2.parse_dom(html, 'div', {'class': 'entry-content'})
        if fragment:
            for _attrs, td in dom_parser2.parse_dom(fragment[0].content, 'td'):
                for attrs, _content in dom_parser2.parse_dom(td, 'a', req='href'):
                    meta = scraper_utils.parse_episode_link(attrs['href'])
                    sources[attrs['href']] = scraper_utils.height_get_quality(meta['height'])

        for source, values in sources.iteritems():
            if scraper_utils.excluded_link(source): continue
            host = urlparse.urlparse(source).hostname
            hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': source, 'rating': None, 'quality': values, 'direct': False}
            hosters.append(hoster)
        return hosters
 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     url = scraper_utils.urljoin(self.base_url, source_url)
     html = self._http_get(url, require_debrid=True, cache_limit=.5)
     post = dom_parser2.parse_dom(html, 'div', {'class': 'entry-content'})
     if not post: return hosters
     for match in re.finditer('(?:href="|>)(https?://[^"<]+)', post[0].content):
         stream_url = match.group(1)
         if scraper_utils.excluded_link(stream_url) or 'imdb.com' in stream_url: continue
         host = urlparse.urlparse(stream_url).hostname
         if video.video_type == VIDEO_TYPES.MOVIE:
             meta = scraper_utils.parse_movie_link(stream_url)
         else:
             meta = scraper_utils.parse_episode_link(stream_url)
         quality = scraper_utils.height_get_quality(meta['height'])
         hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'quality': quality, 'direct': False}
         hosters.append(hoster)
     return hosters
Beispiel #14
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        sources = {}
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(url, require_debrid=True, cache_limit=.5)
        if not html:
            url = scraper_utils.urljoin(self.old_base_url, source_url)
            html = self._http_get(url, require_debrid=True, cache_limit=.5)
            
        sources.update(self.__get_post_links(html, video))
        
        if kodi.get_setting('%s-include_comments' % (self.get_name())) == 'true':
            for _attrs, comment in dom_parser2.parse_dom(html, 'div', {'id': re.compile('commentbody-\d+')}):
                sources.update(self.__get_comment_links(comment, video))

        for source in sources:
            if scraper_utils.excluded_link(source): continue
            host = urlparse.urlparse(source).hostname
            hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': source, 'rating': None, 'quality': sources[source], 'direct': False}
            hosters.append(hoster)
        return hosters
 def get_sources(self, video):
     hosters = []
     source_url = self.get_url(video)
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     page_url = scraper_utils.urljoin(self.base_url, source_url)
     html = self._http_get(page_url, require_debrid=False, cache_limit=.5)
     if video.video_type == VIDEO_TYPES.MOVIE:
         page_url = self.__get_release(html, video)
         if page_url is None: return hosters
         
         page_url = scraper_utils.urljoin(self.base_url, page_url)
         html = self._http_get(page_url, require_debrid=False, cache_limit=.5)
         
     hevc = False
     for _attrs, content in dom_parser2.parse_dom(html, 'span', {'class': 'releaselabel'}):
         if re.search('(hevc|x265)', content, re.I):
             hevc = 'x265'
             
         match = re.search('(\d+)x(\d+)', content)
         if match:
             _width, height = match.groups()
             quality = scraper_utils.height_get_quality(height)
             break
     else:
         quality = QUALITIES.HIGH
     
     streams = [attrs['href'] for attrs, _content in dom_parser2.parse_dom(html, 'a', {'class': 'links'}, req='href')]
     streams += [content for _attrs, content in dom_parser2.parse_dom(html, 'pre', {'class': 'links'})]
     for stream_url in streams:
         if scraper_utils.excluded_link(stream_url): continue
         host = urlparse.urlparse(stream_url).hostname
         hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'quality': quality, 'direct': False}
         if hevc: hoster['format'] = hevc
         hosters.append(hoster)
             
     return hosters
Beispiel #16
0
 def get_sources(self, video):
     hosters = []
     source_url = self.get_url(video)
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     page_url = scraper_utils.urljoin(self.base_url, source_url)
     html = self._http_get(page_url, require_debrid=True, cache_limit=.5)
     if video.video_type == VIDEO_TYPES.MOVIE:
         page_url = self.__get_release(html, video)
         if page_url is None: return hosters
         
         page_url = scraper_utils.urljoin(self.base_url, page_url)
         html = self._http_get(page_url, require_debrid=True, cache_limit=.5)
         
     hevc = False
     for _attrs, content in dom_parser2.parse_dom(html, 'span', {'class': 'releaselabel'}):
         if re.search('(hevc|x265)', content, re.I):
             hevc = 'x265'
             
         match = re.search('(\d+)x(\d+)', content)
         if match:
             _width, height = match.groups()
             quality = scraper_utils.height_get_quality(height)
             break
     else:
         quality = QUALITIES.HIGH
     
     streams = [attrs['href'] for attrs, _content in dom_parser2.parse_dom(html, 'a', {'class': 'links'}, req='href')]
     streams += [content for _attrs, content in dom_parser2.parse_dom(html, 'pre', {'class': 'links'})]
     for stream_url in streams:
         if scraper_utils.excluded_link(stream_url): continue
         host = urlparse.urlparse(stream_url).hostname
         hoster = {'multi-part': False, 'host': host, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'quality': quality, 'direct': False}
         if hevc: hoster['format'] = hevc
         hosters.append(hoster)
             
     return hosters