Exemple #1
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        for stream in self.__get_videos(source_url, video):
            if video.video_type == VIDEO_TYPES.EPISODE and not scraper_utils.release_check(
                    video, stream['name']):
                continue

            host = scraper_utils.get_direct_hostname(self, stream['url'])
            hoster = {
                'multi-part': False,
                'class': self,
                'views': None,
                'url': stream['url'],
                'rating': None,
                'host': host,
                'quality': stream['quality'],
                'direct': True
            }
            if 'size' in stream:
                hoster['size'] = scraper_utils.format_size(stream['size'])
            if 'name' in stream: hoster['extra'] = stream['name']
            hosters.append(hoster)

        return hosters
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if source_url:
            url = urlparse.urljoin(self.base_url, source_url)
            html = self._http_get(url, cache_limit=.5)
            js_result = scraper_utils.parse_json(html, url)
            if 'error' in js_result:
                log_utils.log('DD.tv API error: "%s" @ %s' % (js_result['error'], url), log_utils.LOGWARNING)
                return hosters

            for result in js_result:
                if not scraper_utils.release_check(video, result['release'], require_title=False): continue
                if result['quality'] in self.q_order:
                    for key in result['links']:
                        url = result['links'][key][0]
                        if re.search('\.rar(\.|$)', url):
                            continue
                        
                        hostname = urlparse.urlparse(url).hostname
                        hoster = {'multi-part': False, 'class': self, 'views': None, 'url': url, 'rating': None, 'host': hostname, 'quality': QUALITY_MAP[result['quality']], 'direct': False}
                        hoster['format'] = result['quality']
                        if 'x265' in result['release'] and result['quality'] != '1080P-X265': hoster['dd_qual'] += '-x265'
                        hosters.append(hoster)

        return hosters
 def _get_episode_url(self, show_url, video):
     force_title = scraper_utils.force_title(video)
     title_fallback = kodi.get_setting('title-fallback') == 'true'
     norm_title = scraper_utils.normalize_title(video.ep_title)
     page_url = [show_url]
     too_old = False
     while page_url and not too_old:
         html = self._http_get(page_url[0], require_debrid=True, cache_limit=1)
         for _attr, post in dom_parser2.parse_dom(html, 'div', {'id': re.compile('post-\d+')}):
             if self.__too_old(post):
                 too_old = True
                 break
             if CATEGORIES[VIDEO_TYPES.TVSHOW] in post and show_url in post:
                 match = dom_parser2.parse_dom(post, 'a', req='href')
                 if match:
                     url, title = match[0].attrs['href'], match[0].content
                     if not force_title:
                         if scraper_utils.release_check(video, title, require_title=False):
                             return scraper_utils.pathify_url(url)
                     else:
                         if title_fallback and norm_title:
                             match = re.search('</strong>(.*?)</p>', post)
                             if match and norm_title == scraper_utils.normalize_title(match.group(1)):
                                 return scraper_utils.pathify_url(url)
             
         page_url = dom_parser2.parse_dom(html, 'a', {'class': 'nextpostslink'}, req='href')
         if page_url: page_url = [page_url[0].attrs['href']]
Exemple #4
0
    def __get_links(self, url, video):
        hosters = []
        seen_urls = set()
        for search_type in SEARCH_TYPES:
            search_url, params = self.__translate_search(url, search_type)
            if not search_url: continue
            html = self._http_get(search_url, params=params, cache_limit=.5)
            js_result = scraper_utils.parse_json(html, search_url)
            if js_result.get('status') != 'success':
                logger.log('Alluc API Error: |%s|%s|: %s' % (search_url, params, js_result.get('message', 'Unknown Error')), log_utils.LOGWARNING)
                continue
            
            for result in js_result['result']:
                stream_url = result['hosterurls'][0]['url']
                if len(result['hosterurls']) > 1: continue
                if result['extension'] == 'rar': continue
                if stream_url in seen_urls: continue

                if scraper_utils.release_check(video, result['title']):
                    host = urlparse.urlsplit(stream_url).hostname
                    quality = scraper_utils.get_quality(video, host, self._get_title_quality(result['title']))
                    hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': False}
                    hoster['extra'] = scraper_utils.cleanse_title(result['title'])
                    if video.video_type == VIDEO_TYPES.MOVIE:
                        meta = scraper_utils.parse_movie_link(hoster['extra'])
                    else:
                        meta = scraper_utils.parse_episode_link(hoster['extra'])
                    if 'format' in meta: hoster['format'] = meta['format']
                    
                    hosters.append(hoster)
                    seen_urls.add(stream_url)

        return hosters
    def __get_links(self, url, video):
        hosters = []
        seen_urls = set()
        for search_type in SEARCH_TYPES:
            search_url, params = self.__translate_search(url, search_type)
            if search_url:
                html = self._http_get(search_url, params=params, cache_limit=.5)
                js_result = scraper_utils.parse_json(html, search_url)
                if js_result.get('status') == 'success':
                    for result in js_result['result']:
                        if len(result['hosterurls']) > 1: continue
                        if result['extension'] == 'rar': continue
                        
                        stream_url = result['hosterurls'][0]['url']
                        if stream_url not in seen_urls:
                            if scraper_utils.release_check(video, result['title']):
                                host = urlparse.urlsplit(stream_url).hostname
                                quality = scraper_utils.get_quality(video, host, self._get_title_quality(result['title']))
                                hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': False}
                                hoster['extra'] = result['title']
                                hosters.append(hoster)
                                seen_urls.add(stream_url)
                else:
                    log_utils.log('Alluc API Error: |%s|%s|: %s' % (search_url, params, js_result.get('message', 'Unknown Error')), log_utils.LOGWARNING)

        return hosters
Exemple #6
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(url, cache_limit=.5)
        js_result = scraper_utils.parse_json(html, url)
        if 'error' in js_result:
            logger.log('DD.tv API error: "%s" @ %s' % (js_result['error'], url), log_utils.LOGWARNING)
            return hosters

        for result in js_result:
            if not scraper_utils.release_check(video, result['release'], require_title=False): continue
            if result['quality'] in self.q_order:
                for key in result['links']:
                    url = result['links'][key][0]
                    if re.search('\.rar(\.|$)', url):
                        continue
                    
                    hostname = urlparse.urlparse(url).hostname
                    hoster = {'multi-part': False, 'class': self, 'views': None, 'url': url, 'rating': None, 'host': hostname, 'quality': QUALITY_MAP[result['quality']], 'direct': False}
                    hoster['format'] = result['quality']
                    if 'x265' in result['release'] and result['quality'] != '1080P-X265': hoster['dd_qual'] += '-x265'
                    hosters.append(hoster)

        return hosters
Exemple #7
0
 def _get_episode_url(self, show_url, video):
     force_title = scraper_utils.force_title(video)
     title_fallback = kodi.get_setting('title-fallback') == 'true'
     norm_title = scraper_utils.normalize_title(video.ep_title)
     page_url = [show_url]
     too_old = False
     while page_url and not too_old:
         url = scraper_utils.urljoin(self.base_url, page_url[0])
         html = self._http_get(url, require_debrid=True, cache_limit=1)
         headings = re.findall('<h2>\s*<a\s+href="([^"]+)[^>]+>(.*?)</a>', html)
         posts = [r.content for r in dom_parser2.parse_dom(html, 'div', {'id': re.compile('post-\d+')})]
         for heading, post in zip(headings, posts):
             if self.__too_old(post):
                 too_old = True
                 break
             if CATEGORIES[VIDEO_TYPES.TVSHOW] in post and show_url in post:
                 url, title = heading
                 if not force_title:
                     if scraper_utils.release_check(video, title, require_title=False):
                         return scraper_utils.pathify_url(url)
                 else:
                     if title_fallback and norm_title:
                         match = re.search('<strong>(.*?)</strong>', post)
                         if match and norm_title == scraper_utils.normalize_title(match.group(1)):
                             return scraper_utils.pathify_url(url)
             
         page_url = dom_parser2.parse_dom(html, 'a', {'class': 'nextpostslink'}, req='href')
         if page_url: page_url = [page_url[0].attrs['href']]
 def _get_episode_url(self, show_url, video):
     force_title = scraper_utils.force_title(video)
     title_fallback = kodi.get_setting('title-fallback') == 'true'
     norm_title = scraper_utils.normalize_title(video.ep_title)
     page_url = [show_url]
     too_old = False
     while page_url and not too_old:
         url = urlparse.urljoin(self.base_url, page_url[0])
         html = self._http_get(url, require_debrid=True, cache_limit=1)
         posts = dom_parser.parse_dom(html, 'div', {'id': 'post-\d+'})
         for post in posts:
             if self.__too_old(post):
                 too_old = True
                 break
             if CATEGORIES[VIDEO_TYPES.TVSHOW] in post and show_url in post:
                 match = re.search('<a\s+href="([^"]+)[^>]+>(.*?)</a>', post)
                 if match:
                     url, title = match.groups()
                     if not force_title:
                         if scraper_utils.release_check(video, title, require_title=False):
                             return scraper_utils.pathify_url(url)
                     else:
                         if title_fallback and norm_title:
                             match = re.search('</strong>(.*?)</p>', post)
                             if match and norm_title == scraper_utils.normalize_title(match.group(1)):
                                 return scraper_utils.pathify_url(url)
             
         page_url = dom_parser.parse_dom(html, 'a', {'class': 'nextpostslink'}, ret='href')
 def _get_episode_url(self, show_url, video):
     force_title = scraper_utils.force_title(video)
     title_fallback = kodi.get_setting('title-fallback') == 'true'
     norm_title = scraper_utils.normalize_title(video.ep_title)
     page_url = [show_url]
     too_old = False
     while page_url and not too_old:
         html = self._http_get(page_url[0], require_debrid=False, cache_limit=1)
         for _attr, post in dom_parser2.parse_dom(html, 'div', {'id': re.compile('post-\d+')}):
             if self.__too_old(post):
                 too_old = True
                 break
             if CATEGORIES[VIDEO_TYPES.TVSHOW] in post and show_url in post:
                 match = dom_parser2.parse_dom(post, 'a', req='href')
                 if match:
                     url, title = match[0].attrs['href'], match[0].content
                     if not force_title:
                         if scraper_utils.release_check(video, title, require_title=False):
                             return scraper_utils.pathify_url(url)
                     else:
                         if title_fallback and norm_title:
                             match = re.search('</strong>(.*?)</p>', post)
                             if match and norm_title == scraper_utils.normalize_title(match.group(1)):
                                 return scraper_utils.pathify_url(url)
             
         page_url = dom_parser2.parse_dom(html, 'a', {'class': 'nextpostslink'}, req='href')
         if page_url: page_url = [page_url[0].attrs['href']]
Exemple #10
0
 def __get_sources(self, video, html):
     sources = {}
     for match in re.finditer('<center>\s*<b>\s*(.*?)\s*</b>.*?<tr>(.*?)</tr>', html, re.DOTALL):
         release, links = match.groups()
         release = re.sub('</?[^>]*>', '', release)
         if scraper_utils.release_check(video, release):
             meta = scraper_utils.parse_episode_link(release)
             for match in re.finditer('href="([^"]+)', links):
                 sources[match.group(1)] = scraper_utils.height_get_quality(meta['height'])
     return sources
 def __get_sources(self, video, html):
     sources = {}
     for match in re.finditer('<center>\s*<b>\s*(.*?)\s*</b>.*?<tr>(.*?)</tr>', html, re.DOTALL):
         release, links = match.groups()
         release = re.sub('</?[^>]*>', '', release)
         if scraper_utils.release_check(video, release):
             meta = scraper_utils.parse_episode_link(release)
             for match in re.finditer('href="([^"]+)', links):
                 sources[match.group(1)] = scraper_utils.height_get_quality(meta['height'])
     return sources
Exemple #12
0
    def get_url(self, video):
        url = super(self.__class__, self).get_url(video)

        # check each torrent to see if it's an episode if there is no season url
        if url is None and video.video_type == VIDEO_TYPES.EPISODE:
            if not scraper_utils.force_title(video):
                for item in self.__get_torrents():
                    if scraper_utils.release_check(video, item['name']):
                        return 'hash=%s' % (item['hash'])

        return url
 def get_url(self, video):
     url = super(self.__class__, self).get_url(video)
     
     # check each torrent to see if it's an episode if there is no season url
     if url is None and video.video_type == VIDEO_TYPES.EPISODE:
         if not scraper_utils.force_title(video):
             for item in self.__get_torrents():
                 if scraper_utils.release_check(video, item['name']):
                     return 'hash=%s' % (item['hash'])
                 
     return url
    def __get_links(self, url, video):
        hosters = []
        search_url = self.__translate_search(url)
        html = self._http_get(search_url, cache_limit=.5)
        js_result = scraper_utils.parse_json(html, search_url)
        down_url = js_result.get('downURL')
        dl_farm = js_result.get('dlFarm')
        dl_port = js_result.get('dlPort')
        for item in js_result.get('data', []):
            post_hash, size, post_title, ext, duration = item['0'], item['4'], item['10'], item['11'], item['14']
            checks = [False] * 6
            if not scraper_utils.release_check(video, post_title): checks[0] = True
            if 'alangs' in item and item['alangs'] and 'eng' not in item['alangs']: checks[1] = True
            if re.match('^\d+s', duration) or re.match('^[0-5]m', duration): checks[2] = True
            if 'passwd' in item and item['passwd']: checks[3] = True
            if 'virus' in item and item['virus']: checks[4] = True
            if 'type' in item and item['type'].upper() != 'VIDEO': checks[5] = True
            if any(checks):
                log_utils.log('EasyNews Post excluded: %s - |%s|' % (checks, item), log_utils.LOGDEBUG)
                continue
            
            stream_url = down_url + urllib.quote('/%s/%s/%s%s/%s%s' % (dl_farm, dl_port, post_hash, ext, post_title, ext))
            stream_url = stream_url + '|Authorization=%s' % (urllib.quote(self.auth))
            host = self._get_direct_hostname(stream_url)
            quality = None
            if 'width' in item:
                try: width = int(item['width'])
                except: width = 0
                if width:
                    quality = scraper_utils.width_get_quality(width)
            
            if quality is None:
                if video.video_type == VIDEO_TYPES.MOVIE:
                    meta = scraper_utils.parse_movie_link(post_title)
                else:
                    meta = scraper_utils.parse_episode_link(post_title)
                quality = scraper_utils.height_get_quality(meta['height'])
                
            if self.max_bytes:
                match = re.search('([\d.]+)\s+(.*)', size)
                if match:
                    size_bytes = scraper_utils.to_bytes(*match.groups())
                    if size_bytes > self.max_bytes:
                        log_utils.log('Result skipped, Too big: |%s| - %s (%s) > %s (%s GB)' % (post_title, size_bytes, size, self.max_bytes, self.max_gb))
                        continue

            hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': True}
            if any(i for i in ['X265', 'HEVC'] if i in post_title.upper()): hoster['format'] = 'x265'
            if size: hoster['size'] = size
            if post_title: hoster['extra'] = post_title
            hosters.append(hoster)
        return hosters
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        for stream in self.__get_videos(source_url, video):
            if video.video_type == VIDEO_TYPES.EPISODE and not scraper_utils.release_check(video, stream['name']):
                continue

            host = scraper_utils.get_direct_hostname(self, stream['url'])
            hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream['url'], 'rating': None, 'host': host, 'quality': stream['quality'], 'direct': True}
            if 'size' in stream: hoster['size'] = scraper_utils.format_size(stream['size'])
            if 'name' in stream: hoster['extra'] = stream['name']
            hosters.append(hoster)
                         
        return hosters
    def __get_links(self, url, video):
        hosters = []
        search_url = urlparse.urljoin(self.base_url, SEARCH_URL)
        query = self.__translate_search(url)
        result = self._http_get(search_url, data=query, allow_redirect=False, cache_limit=.5)
        if 'files' in result:
            for item in result['files']:
                checks = [False] * 6
                if 'type' not in item or item['type'].upper() != 'VIDEO': checks[0] = True
                if 'is_ready' in item and item['is_ready'] != '1': checks[1] = True
                if 'av_result' in item and item['av_result'] in ['warning', 'infected']: checks[2] = True
                if 'video_info' not in item: checks[3] = True
                if 'video_info' in item and item['video_info'] and not re.search('#0:(?:0|1)(?:\(eng\)|\(und\))?:\s*Audio:', item['video_info']): checks[4] = True
                if not scraper_utils.release_check(video, item['name']): checks[5] = True
                if any(checks):
                    log_utils.log('Furk.net result excluded: %s - |%s|' % (checks, item['name']), log_utils.LOGDEBUG)
                    continue
                
                match = re.search('(\d{3,})\s*x\s*(\d{3,})', item['video_info'])
                if match:
                    width, _height = match.groups()
                    quality = scraper_utils.width_get_quality(width)
                else:
                    if video.video_type == VIDEO_TYPES.MOVIE:
                        meta = scraper_utils.parse_movie_link(item['name'])
                    else:
                        meta = scraper_utils.parse_episode_link(item['name'])
                    quality = scraper_utils.height_get_quality(meta['height'])
                    
                if 'url_pls' in item:
                    size_gb = scraper_utils.format_size(int(item['size']), 'B')
                    if self.max_bytes and int(item['size']) > self.max_bytes:
                        log_utils.log('Result skipped, Too big: |%s| - %s (%s) > %s (%sGB)' % (item['name'], item['size'], size_gb, self.max_bytes, self.max_gb))
                        continue

                    stream_url = item['url_pls']
                    host = self._get_direct_hostname(stream_url)
                    hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': True}
                    hoster['size'] = size_gb
                    hoster['extra'] = item['name']
                    hosters.append(hoster)
                else:
                    log_utils.log('Furk.net result skipped - no playlist: |%s|' % (json.dumps(item)), log_utils.LOGDEBUG)
                    
        return hosters
Exemple #17
0
    def __get_links(self, url, video):
        hosters = []
        search_url = scraper_utils.urljoin(self.base_url, SEARCH_URL)
        query = self.__translate_search(url)
        result = self._http_get(search_url, data=query, allow_redirect=False, cache_limit=.5)
        for item in result.get('files', []):
            checks = [False] * 6
            if item.get('type', '').upper() != 'VIDEO': checks[0] = True
            if item.get('is_ready') != '1': checks[1] = True
            if item.get('av_result') in ['warning', 'infected']: checks[2] = True
            if 'video_info' not in item: checks[3] = True
            if item.get('video_info') and not re.search('#0:(0|1)(\((eng|und)\))?:\s*Audio:', item['video_info'], re.I): checks[4] = True
            if not scraper_utils.release_check(video, item['name']): checks[5] = True
            if any(checks):
                logger.log('Furk.net result excluded: %s - |%s|' % (checks, item['name']), log_utils.LOGDEBUG)
                continue
            
            match = re.search('(\d{3,})\s*x\s*(\d{3,})', item['video_info'])
            if match:
                width, _height = match.groups()
                quality = scraper_utils.width_get_quality(width)
            else:
                if video.video_type == VIDEO_TYPES.MOVIE:
                    meta = scraper_utils.parse_movie_link(item['name'])
                else:
                    meta = scraper_utils.parse_episode_link(item['name'])
                quality = scraper_utils.height_get_quality(meta['height'])
                
            if 'url_pls' in item:
                size_gb = scraper_utils.format_size(int(item['size']), 'B')
                if self.max_bytes and int(item['size']) > self.max_bytes:
                    logger.log('Result skipped, Too big: |%s| - %s (%s) > %s (%sGB)' % (item['name'], item['size'], size_gb, self.max_bytes, self.max_gb))
                    continue

                stream_url = item['url_pls']
                host = scraper_utils.get_direct_hostname(self, stream_url)
                hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': True}
                hoster['size'] = size_gb
                hoster['extra'] = item['name']
                hosters.append(hoster)
            else:
                logger.log('Furk.net result skipped - no playlist: |%s|' % (json.dumps(item)), log_utils.LOGDEBUG)
                    
        return hosters
    def __get_release(self, html, video):
        try: select = int(kodi.get_setting('%s-select' % (self.get_name())))
        except: select = 0
        ul_id = 'releases' if video.video_type == VIDEO_TYPES.MOVIE else 'episodes'
        fragment = dom_parser2.parse_dom(html, 'ul', {'id': ul_id})
        if fragment:
            best_qorder = 0
            best_page = None
            for _attrs, item in dom_parser2.parse_dom(fragment[0].content, 'li'):
                match = dom_parser2.parse_dom(item, 'span', req=['href', 'title'])
                if not match:
                    match = dom_parser2.parse_dom(item, 'a', req=['href', 'title'])
                    if not match: continue
                
                page_url, release = match[0].attrs['href'], match[0].attrs['title']
                match = dom_parser2.parse_dom(item, 'span', {'class': 'time'})
                if match and self.__too_old(match[0].content): break
                
                release = re.sub('^\[[^\]]*\]\s*', '', release)
                if video.video_type == VIDEO_TYPES.MOVIE:
                    meta = scraper_utils.parse_movie_link(release)
                else:
                    if not scraper_utils.release_check(video, release, require_title=False): continue
                    meta = scraper_utils.parse_episode_link(release)

                if select == 0:
                    best_page = page_url
                    break
                else:
                    quality = scraper_utils.height_get_quality(meta['height'])
                    logger.log('result: |%s|%s|%s|' % (page_url, quality, Q_ORDER[quality]), log_utils.LOGDEBUG)
                    if Q_ORDER[quality] > best_qorder:
                        logger.log('Setting best as: |%s|%s|%s|' % (page_url, quality, Q_ORDER[quality]), log_utils.LOGDEBUG)
                        best_page = page_url
                        best_qorder = Q_ORDER[quality]
            
            return best_page
Exemple #19
0
    def __get_release(self, html, video):
        try: select = int(kodi.get_setting('%s-select' % (self.get_name())))
        except: select = 0
        ul_id = 'releases' if video.video_type == VIDEO_TYPES.MOVIE else 'episodes'
        fragment = dom_parser2.parse_dom(html, 'ul', {'id': ul_id})
        if fragment:
            best_qorder = 0
            best_page = None
            for _attrs, item in dom_parser2.parse_dom(fragment[0].content, 'li'):
                match = dom_parser2.parse_dom(item, 'span', req=['href', 'title'])
                if not match:
                    match = dom_parser2.parse_dom(item, 'a', req=['href', 'title'])
                    if not match: continue
                
                page_url, release = match[0].attrs['href'], match[0].attrs['title']
                match = dom_parser2.parse_dom(item, 'span', {'class': 'time'})
                if match and self.__too_old(match[0].content): break
                
                release = re.sub('^\[[^\]]*\]\s*', '', release)
                if video.video_type == VIDEO_TYPES.MOVIE:
                    meta = scraper_utils.parse_movie_link(release)
                else:
                    if not scraper_utils.release_check(video, release, require_title=False): continue
                    meta = scraper_utils.parse_episode_link(release)

                if select == 0:
                    best_page = page_url
                    break
                else:
                    quality = scraper_utils.height_get_quality(meta['height'])
                    logger.log('result: |%s|%s|%s|' % (page_url, quality, Q_ORDER[quality]), log_utils.LOGDEBUG)
                    if Q_ORDER[quality] > best_qorder:
                        logger.log('Setting best as: |%s|%s|%s|' % (page_url, quality, Q_ORDER[quality]), log_utils.LOGDEBUG)
                        best_page = page_url
                        best_qorder = Q_ORDER[quality]
            
            return best_page
Exemple #20
0
    def __get_links(self, url, video):
        hosters = []
        search_url, params = self.__translate_search(url)
        html = self._http_get(search_url, params=params, cache_limit=.5)
        js_result = scraper_utils.parse_json(html, search_url)
        down_url = js_result.get('downURL')
        dl_farm = js_result.get('dlFarm')
        dl_port = js_result.get('dlPort')
        for item in js_result.get('data', []):
            post_hash, size, post_title, ext, duration = item['0'], item[
                '4'], item['10'], item['11'], item['14']
            checks = [False] * 6
            if not scraper_utils.release_check(video, post_title):
                checks[0] = True
            if 'alangs' in item and item['alangs'] and 'eng' not in item[
                    'alangs']:
                checks[1] = True
            if re.match('^\d+s', duration) or re.match('^[0-5]m', duration):
                checks[2] = True
            if 'passwd' in item and item['passwd']: checks[3] = True
            if 'virus' in item and item['virus']: checks[4] = True
            if 'type' in item and item['type'].upper() != 'VIDEO':
                checks[5] = True
            if any(checks):
                logger.log(
                    'EasyNews Post excluded: %s - |%s|' % (checks, item),
                    log_utils.LOGDEBUG)
                continue

            stream_url = down_url + urllib.quote(
                '/%s/%s/%s%s/%s%s' %
                (dl_farm, dl_port, post_hash, ext, post_title, ext))
            stream_url = stream_url + '|Authorization=%s' % (urllib.quote(
                self.auth))
            host = scraper_utils.get_direct_hostname(self, stream_url)
            quality = None
            if 'width' in item:
                try:
                    width = int(item['width'])
                except:
                    width = 0
                if width:
                    quality = scraper_utils.width_get_quality(width)

            if quality is None:
                if video.video_type == VIDEO_TYPES.MOVIE:
                    meta = scraper_utils.parse_movie_link(post_title)
                else:
                    meta = scraper_utils.parse_episode_link(post_title)
                quality = scraper_utils.height_get_quality(meta['height'])

            if self.max_bytes:
                match = re.search('([\d.]+)\s+(.*)', size)
                if match:
                    size_bytes = scraper_utils.to_bytes(*match.groups())
                    if size_bytes > self.max_bytes:
                        logger.log(
                            'Result skipped, Too big: |%s| - %s (%s) > %s (%s GB)'
                            % (post_title, size_bytes, size, self.max_bytes,
                               self.max_gb))
                        continue

            hoster = {
                'multi-part': False,
                'class': self,
                'views': None,
                'url': stream_url,
                'rating': None,
                'host': host,
                'quality': quality,
                'direct': True
            }
            if any(i for i in ['X265', 'HEVC'] if i in post_title.upper()):
                hoster['format'] = 'x265'
            if size: hoster['size'] = size
            if post_title: hoster['extra'] = post_title
            hosters.append(hoster)
        return hosters
 def __episode_match(self, line, video):
     return scraper_utils.release_check(video, line['link'], require_title=False)
Exemple #22
0
 def _get_episode_url(self, season_url, video):
     query = scraper_utils.parse_query(season_url)
     if 'hash' in query:
         for stream in self.__get_videos(season_url, video):
             if scraper_utils.release_check(video, stream['name']):
                 return season_url
 def _get_episode_url(self, season_url, video):
     query = scraper_utils.parse_query(season_url)
     if 'hash' in query:
         for stream in self.__get_videos(season_url, video):
             if scraper_utils.release_check(video, stream['name']):
                 return season_url