Ejemplo n.º 1
0
    def __get_links(self, url, video):
        hosters = []
        seen_urls = set()
        for search_type in SEARCH_TYPES:
            search_url = self.__translate_search(url, search_type)
            if search_url:
                html = self._http_get(search_url, cache_limit=.5)
                js_result = scraper_utils.parse_json(html, search_url)
                if 'status' in js_result and js_result['status'] == 'success':
                    for result in js_result['result']:
                        if len(result['hosterurls']) > 1: continue
                        if result['extension'] == 'rar': continue
                        
                        stream_url = result['hosterurls'][0]['url']
                        if stream_url not in seen_urls:
                            if scraper_utils.title_check(video, result['title']):
                                host = urlparse.urlsplit(stream_url).hostname
                                quality = scraper_utils.get_quality(video, host, self._get_title_quality(result['title']))
                                hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': False}
                                hoster['extra'] = result['title']
                                hosters.append(hoster)
                                seen_urls.add(stream_url)
                else:
                    log_utils.log('Alluc API Error: %s: %s' % (search_url, js_result['message']), log_utils.LOGWARNING)

        return hosters
Ejemplo n.º 2
0
 def __get_links(self, url, video):
     hosters = []
     search_url = self.__translate_search(url)
     html = self._http_get(search_url, cache_limit=.5)
     js_result = scraper_utils.parse_json(html, search_url)
     if 'data' in js_result:
         for item in js_result['data']:
             post_hash, size, post_title, ext, duration = item['0'], item['4'], item['10'], item['11'], item['14']
             checks = [False] * 6
             if not scraper_utils.title_check(video, post_title): checks[0] = True
             if 'alangs' in item and item['alangs'] and 'eng' not in item['alangs']: checks[1] = True
             if re.match('^\d+s', duration) or re.match('^[0-5]m', duration): checks[2] = True
             if 'passwd' in item and item['passwd']: checks[3] = True
             if 'virus' in item and item['virus']: checks[4] = True
             if 'type' in item and item['type'].upper() != 'VIDEO': checks[5] = True
             if any(checks):
                 log_utils.log('EasyNews Post excluded: %s - |%s|' % (checks, item), log_utils.LOGDEBUG)
                 continue
             
             stream_url = urllib.quote('%s%s/%s%s' % (post_hash, ext, post_title, ext))
             stream_url = 'http://members.easynews.com/dl/%s' % (stream_url)
             stream_url = stream_url + '|Cookie=%s' % (self._get_stream_cookies())
             host = self._get_direct_hostname(stream_url)
             quality = scraper_utils.width_get_quality(item['width'])
             hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': True}
             if size: hoster['size'] = size
             if post_title: hoster['extra'] = post_title
             hosters.append(hoster)
     return hosters
Ejemplo n.º 3
0
 def __get_links(self, url, video):
     hosters = []
     search_url = self.__translate_search(url)
     html = self._http_get(search_url, cache_limit=.5)
     js_result = scraper_utils.parse_json(html, search_url)
     if 'data' in js_result:
         for item in js_result['data']:
             post_hash, size, post_title, ext, duration = item['0'], item['4'], item['10'], item['11'], item['14']
             checks = [False] * 6
             if not scraper_utils.title_check(video, post_title): checks[0] = True
             if 'alangs' in item and item['alangs'] and 'eng' not in item['alangs']: checks[1] = True
             if re.match('^\d+s', duration) or re.match('^[0-5]m', duration): checks[2] = True
             if 'passwd' in item and item['passwd']: checks[3] = True
             if 'virus' in item and item['virus']: checks[4] = True
             if 'type' in item and item['type'].upper() != 'VIDEO': checks[5] = True
             if any(checks):
                 log_utils.log('EasyNews Post excluded: %s - |%s|' % (checks, item), log_utils.LOGDEBUG)
                 continue
             
             stream_url = urllib.quote('%s%s/%s%s' % (post_hash, ext, post_title, ext))
             stream_url = 'http://members.easynews.com/dl/%s' % (stream_url)
             stream_url = stream_url + '|Cookie=%s' % (self._get_stream_cookies())
             host = self._get_direct_hostname(stream_url)
             quality = scraper_utils.width_get_quality(item['width'])
             hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': True}
             if size: hoster['size'] = size
             if post_title: hoster['extra'] = post_title
             hosters.append(hoster)
     return hosters
Ejemplo n.º 4
0
    def __get_links(self, url, video):
        hosters = []
        seen_urls = set()
        for search_type in SEARCH_TYPES:
            search_url = self.__translate_search(url, search_type)
            if search_url:
                html = self._http_get(search_url, cache_limit=.5)
                js_result = scraper_utils.parse_json(html, search_url)
                if 'status' in js_result and js_result['status'] == 'success':
                    for result in js_result['result']:
                        if len(result['hosterurls']) > 1: continue
                        if result['extension'] == 'rar': continue

                        stream_url = result['hosterurls'][0]['url']
                        if stream_url not in seen_urls:
                            if scraper_utils.title_check(
                                    video, result['title']):
                                host = urlparse.urlsplit(stream_url).hostname
                                quality = scraper_utils.get_quality(
                                    video, host,
                                    self._get_title_quality(result['title']))
                                hoster = {
                                    'multi-part': False,
                                    'class': self,
                                    'views': None,
                                    'url': stream_url,
                                    'rating': None,
                                    'host': host,
                                    'quality': quality,
                                    'direct': False
                                }
                                hoster['extra'] = result['title']
                                hosters.append(hoster)
                                seen_urls.add(stream_url)
                else:
                    log_utils.log(
                        'Alluc API Error: %s: %s' %
                        (search_url, js_result['message']),
                        log_utils.LOGWARNING)

        return hosters