示例#1
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        params = scraper_utils.parse_query(source_url)
        if 'title' in params:
            query = params['title'].replace("'", "")
            if video.video_type == VIDEO_TYPES.MOVIE:
                if 'year' in params: query += ' %s' % (params['year'])
            else:
                sxe = ''
                if 'season' in params:
                    sxe = 'S%02d' % (int(params['season']))
                if 'episode' in params:
                    sxe += 'E%02d' % (int(params['episode']))
                if sxe: query = '%s %s' % (query, sxe)
            query = urllib.quote_plus(query)
            query_url = '/search?query=%s' % (query)
            hosters = self.__get_links(query_url, video)
            if not hosters and video.video_type == VIDEO_TYPES.EPISODE and params['air_date']:
                query = urllib.quote_plus('%s %s' % (params['title'], params['air_date'].replace('-', '.')))
                query_url = '/search?query=%s' % (query)
                hosters = self.__get_links(query_url, video)

        return hosters
示例#2
0
 def get_sources(self, video):
     hosters = []
     source_url = self.get_url(video)
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     query = scraper_utils.parse_query(source_url)
     if 'id' in query:
         vid_type = 'movies' if video.video_type == VIDEO_TYPES.MOVIE else 'episodes'
         url = scraper_utils.urljoin(
             self.base_url, '/api/v2/%s/%s' % (vid_type, query['id']))
         js_data = self._http_get(url, cache_limit=.5)
         if 'url' in js_data:
             stream_url = js_data['url']
             quality = QUALITIES.HD720
             hoster = {
                 'multi-part': False,
                 'host':
                 scraper_utils.get_direct_hostname(self, stream_url),
                 'class': self,
                 'url': stream_url,
                 'quality': quality,
                 'views': None,
                 'rating': None,
                 'direct': True
             }
             hosters.append(hoster)
     return hosters
示例#3
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        query = scraper_utils.parse_query(source_url)
        if 'link' in query:
            stream_url = query['link']
            host = urlparse.urlparse(stream_url).hostname
            if 'xml_file' in query:
                xml_meta = XML_META.get(query['xml_file'], {})
            else:
                xml_meta = {}

            quality = xml_meta.get('quality', QUALITIES.HD1080)
            source = {
                'multi-part': False,
                'url': stream_url,
                'host': host,
                'class': self,
                'quality': quality,
                'views': None,
                'rating': None,
                'direct': False
            }
            if 'quality' in xml_meta: del xml_meta['quality']
            source.update(xml_meta)
            hosters.append(source)

        return hosters
示例#4
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        params = scraper_utils.parse_query(source_url)
        if 'title' in params:
            search_title = re.sub("[^A-Za-z0-9. ]", "", urllib.unquote_plus(params['title']))
            query = search_title
            if video.video_type == VIDEO_TYPES.MOVIE:
                if 'year' in params: query += ' %s' % (params['year'])
            else:
                sxe = ''
                if 'season' in params:
                    sxe = 'S%02d' % (int(params['season']))
                if 'episode' in params:
                    sxe += 'E%02d' % (int(params['episode']))
                if sxe: query = '%s %s' % (query, sxe)
            query_url = '/search?query=%s' % (query)
            hosters = self.__get_links(query_url, video)
            if not hosters and video.video_type == VIDEO_TYPES.EPISODE and params['air_date']:
                query = urllib.quote_plus('%s %s' % (search_title, params['air_date'].replace('-', '.')))
                query_url = '/search?query=%s' % (query)
                hosters = self.__get_links(query_url, video)

        return hosters
示例#5
0
 def _get_episode_url(self, show_url, video):
     query = scraper_utils.parse_query(show_url)
     if 'id' in query:
         url = scraper_utils.urljoin(self.base_url, '/api/v2/shows/%s' % (query['id']))
         js_data = self._http_get(url, cache_limit=.5)
         if 'episodes' in js_data:
             force_title = scraper_utils.force_title(video)
             if not force_title:
                 for episode in js_data['episodes']:
                     if int(video.season) == int(episode['season']) and int(video.episode) == int(episode['number']):
                         return scraper_utils.pathify_url('?id=%s' % (episode['id']))
                 
                 if kodi.get_setting('airdate-fallback') == 'true' and video.ep_airdate:
                     for episode in js_data['episodes']:
                         if 'airdate' in episode:
                             ep_airdate = scraper_utils.to_datetime(episode['airdate'], "%Y-%m-%d").date()
                             if video.ep_airdate == (ep_airdate - datetime.timedelta(days=1)):
                                 return scraper_utils.pathify_url('?id=%s' % (episode['id']))
             else:
                 logger.log('Skipping S&E matching as title search is forced on: %s' % (video.trakt_id), log_utils.LOGDEBUG)
             
             if (force_title or kodi.get_setting('title-fallback') == 'true') and video.ep_title:
                 norm_title = scraper_utils.normalize_title(video.ep_title)
                 for episode in js_data['episodes']:
                     if 'name' in episode and norm_title in scraper_utils.normalize_title(episode['name']):
                         return scraper_utils.pathify_url('?id=%s' % (episode['id']))
示例#6
0
    def _get_episode_url(self, show_url, video):
        params = scraper_utils.parse_query(show_url)
        cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"tvshowid": %s, "season": %s, "filter": {"field": "%s", "operator": "is", "value": "%s"}, \
        "limits": { "start" : 0, "end": 25 }, "properties" : ["title", "season", "episode", "file", "streamdetails"], "sort": { "order": "ascending", "method": "label", "ignorearticle": true }}, "id": "libTvShows"}'
        base_url = 'video_type=%s&id=%s'
        episodes = []
        force_title = scraper_utils.force_title(video)
        if not force_title:
            run = cmd % (params['id'], video.season, 'episode', video.episode)
            meta = xbmc.executeJSONRPC(run)
            meta = scraper_utils.parse_json(meta)
            logger.log('Episode Meta: %s' % (meta), log_utils.LOGDEBUG)
            if 'result' in meta and 'episodes' in meta['result']:
                episodes = meta['result']['episodes']
        else:
            logger.log('Skipping S&E matching as title search is forced on: %s' % (video.trakt_id), log_utils.LOGDEBUG)

        if (force_title or kodi.get_setting('title-fallback') == 'true') and video.ep_title and not episodes:
            run = cmd % (params['id'], video.season, 'title', video.ep_title)
            meta = xbmc.executeJSONRPC(run)
            meta = scraper_utils.parse_json(meta)
            logger.log('Episode Title Meta: %s' % (meta), log_utils.LOGDEBUG)
            if 'result' in meta and 'episodes' in meta['result']:
                episodes = meta['result']['episodes']

        for episode in episodes:
            if episode['file'].endswith('.strm'):
                continue
            
            return base_url % (video.video_type, episode['episodeid'])
示例#7
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        params = scraper_utils.parse_query(source_url)
        if video.video_type == VIDEO_TYPES.MOVIE:
            cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"movieid": %s, "properties" : ["file", "playcount", "streamdetails"]}, "id": "libMovies"}'
            result_key = 'moviedetails'
        else:
            cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"episodeid": %s, "properties" : ["file", "playcount", "streamdetails"]}, "id": "libTvShows"}'
            result_key = 'episodedetails'

        run = cmd % (params['id'])
        meta = xbmc.executeJSONRPC(run)
        meta = scraper_utils.parse_json(meta)
        logger.log('Source Meta: %s' % (meta), log_utils.LOGDEBUG)
        if result_key in meta.get('result', []):
            details = meta['result'][result_key]
            def_quality = [item[0] for item in sorted(SORT_KEYS['quality'].items(), key=lambda x:x[1])][self.def_quality]
            host = {'multi-part': False, 'class': self, 'url': details['file'], 'host': 'XBMC Library', 'quality': def_quality, 'views': details['playcount'], 'rating': None, 'direct': True}
            stream_details = details['streamdetails']
            if len(stream_details['video']) > 0 and 'width' in stream_details['video'][0]:
                host['quality'] = scraper_utils.width_get_quality(stream_details['video'][0]['width'])
            hosters.append(host)
        return hosters
示例#8
0
 def __translate_search(self, url):
     query = scraper_utils.parse_query(url)
     if 'quality' in query:
         q_index = Q_DICT[query['quality']]
         q_list = [dd_qual for dd_qual in DD_QUALITIES if Q_DICT[dd_qual] <= q_index]
     else:
         q_list = self.q_order
     quality = '&'.join(['quality[]=%s' % (q) for q in q_list])
     return scraper_utils.urljoin(self.base_url, (SEARCH_URL % (API_KEY, quality, urllib.quote_plus(query['query']))))
 def __get_videos(self, source_url, video):
     videos = []
     query = scraper_utils.parse_query(source_url)
     if 'hash' in query:
         url = scraper_utils.urljoin(self.base_url, BROWSE_URL)
         js_data = self._http_get(url, params={'hash': query['hash']}, cache_limit=1)
         if 'content' in js_data:
             videos = self.__get_videos2(js_data['content'], video)
     return videos
示例#10
0
 def __translate_search(self, url):
     query = scraper_utils.parse_query(url)
     if 'quality' in query:
         q_index = Q_DICT[query['quality']]
         q_list = [dd_qual for dd_qual in DD_QUALITIES if Q_DICT[dd_qual] <= q_index]
     else:
         q_list = self.q_order
     quality = '&'.join(['quality[]=%s' % (q) for q in q_list])
     return scraper_utils.urljoin(self.base_url, (SEARCH_URL % (API_KEY, quality, urllib.quote_plus(query['query']))))
示例#11
0
 def __translate_search(self, url, search_type):
     query = scraper_utils.parse_query(url)
     query = query['query'] + ' lang:en'
     url = scraper_utils.urljoin(self.base_url, SEARCH_URL % (search_type))
     params = {'query': query, 'count': 200, 'from': 0, 'getmeta': 0}
     if self.username and self.password:
         params.update({'user': self.username, 'password': self.password})
     else:
         url = ''
     return url, params
示例#12
0
 def __translate_search(self, url, search_type):
     query = scraper_utils.parse_query(url)
     query = query['query'] + ' lang:en'
     url = scraper_utils.urljoin(self.base_url, SEARCH_URL % (search_type))
     params = {'query': query, 'count': 100, 'from': 0, 'getmeta': 0}
     if self.username and self.password:
         params.update({'user': self.username, 'password': self.password})
     else:
         url = ''
     return url, params
示例#13
0
 def __get_videos(self, source_url, video):
     videos = []
     query = scraper_utils.parse_query(source_url)
     if 'hash' in query:
         url = scraper_utils.urljoin(self.base_url, BROWSE_URL)
         js_data = self._http_get(url,
                                  params={'hash': query['hash']},
                                  cache_limit=1)
         if 'content' in js_data:
             videos = self.__get_videos2(js_data['content'], video)
     return videos
示例#14
0
 def get_sources(self, video):
     hosters = []
     source_url = self.get_url(video)
     if not source_url or source_url == FORCE_NO_MATCH: return hosters
     query = scraper_utils.parse_query(source_url)
     if 'id' in query:
         vid_type = 'movies' if video.video_type == VIDEO_TYPES.MOVIE else 'episodes'
         url = scraper_utils.urljoin(self.base_url, '/api/v2/%s/%s' % (vid_type, query['id']))
         js_data = self._http_get(url, cache_limit=.5)
         if 'url' in js_data:
             stream_url = js_data['url']
             quality = QUALITIES.HD720
             hoster = {'multi-part': False, 'host': scraper_utils.get_direct_hostname(self, stream_url), 'class': self, 'url': stream_url, 'quality': quality, 'views': None, 'rating': None, 'direct': True}
             hosters.append(hoster)
     return hosters
示例#15
0
 def resolve_link(self, link):
     query = scraper_utils.parse_query(link)
     if 'hash_id' in query:
         hash_id = query['hash_id'].lower()
         if self.__add_torrent(hash_id):
             browse_url = BROWSE_URL % (hash_id)
             browse_url = scraper_utils.urljoin(self.base_url, browse_url)
             js_data = self._json_get(browse_url, cache_limit=0)
             if 'content' in js_data:
                 videos = self.__get_videos(js_data['content'])
                 
                 if len(videos) > 1:
                     result = xbmcgui.Dialog().select(i18n('choose_stream'), [video['label'] for video in videos])
                     if result > -1:
                         return videos[result]['url']
                 elif videos:
                     return videos[0]['url']
示例#16
0
 def __get_pk_links(self, html):
     hosters = []
     match = re.search('var\s+parametros\s*=\s*"([^"]+)', html)
     if match:
         params = scraper_utils.parse_query(match.group(1))
         if 'pic' in params:
             data = {'sou': 'pic', 'fv': '25', 'url': params['pic']}
             html = self._http_get(PK_URL,
                                   headers=XHR,
                                   data=data,
                                   cache_limit=0)
             js_data = scraper_utils.parse_json(html, PK_URL)
             for item in js_data:
                 if 'url' in item and item['url']:
                     if 'width' in item and item['width']:
                         quality = scraper_utils.width_get_quality(
                             item['width'])
                     elif 'height' in item and item['height']:
                         quality = scraper_utils.height_get_quality(
                             item['height'])
                     else:
                         quality = QUALITIES.HD720
                     stream_url = item['url'] + scraper_utils.append_headers(
                         {'User-Agent': scraper_utils.get_ua()})
                     hoster = {
                         'multi-part':
                         False,
                         'url':
                         stream_url,
                         'class':
                         self,
                         'quality':
                         quality,
                         'host':
                         scraper_utils.get_direct_hostname(
                             self, item['url']),
                         'rating':
                         None,
                         'views':
                         None,
                         'direct':
                         True
                     }
                     hosters.append(hoster)
     return hosters
示例#17
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        
        params = scraper_utils.parse_query(source_url)
        if video.video_type == VIDEO_TYPES.MOVIE:
            query = urllib.quote_plus('%s %s' % (params['title'], params['year']))
        else:
            query = urllib.quote_plus('%s S%02dE%02d' % (params['title'], int(params['season']), int(params['episode'])))
        query_url = '/search?query=%s' % (query)
        hosters = self.__get_links(query_url, video)
        if not hosters and video.video_type == VIDEO_TYPES.EPISODE and params['air_date']:
            query = urllib.quote_plus('%s %s' % (params['title'], params['air_date'].replace('-', '.')))
            query_url = '/search?query=%s' % (query)
            hosters = self.__get_links(query_url, video)

        return hosters
示例#18
0
    def resolve_link(self, link):
        query = scraper_utils.parse_query(link)
        if 'hash_id' in query:
            hash_id = query['hash_id'].lower()
            if self.__add_torrent(hash_id):
                browse_url = BROWSE_URL % (hash_id)
                browse_url = scraper_utils.urljoin(self.base_url, browse_url)
                js_data = self._json_get(browse_url, cache_limit=0)
                if 'content' in js_data:
                    videos = self.__get_videos(js_data['content'])

                    if len(videos) > 1:
                        result = xbmcgui.Dialog().select(
                            i18n('choose_stream'),
                            [video['label'] for video in videos])
                        if result > -1:
                            return videos[result]['url']
                    elif videos:
                        return videos[0]['url']
示例#19
0
 def __get_pk_links(self, html):
     hosters = []
     match = re.search('var\s+parametros\s*=\s*"([^"]+)', html)
     if match:
         params = scraper_utils.parse_query(match.group(1))
         if 'pic' in params:
             data = {'sou': 'pic', 'fv': '25', 'url': params['pic']}
             html = self._http_get(PK_URL, headers=XHR, data=data, cache_limit=0)
             js_data = scraper_utils.parse_json(html, PK_URL)
             for item in js_data:
                 if 'url' in item and item['url']:
                     if 'width' in item and item['width']:
                         quality = scraper_utils.width_get_quality(item['width'])
                     elif 'height' in item and item['height']:
                         quality = scraper_utils.height_get_quality(item['height'])
                     else:
                         quality = QUALITIES.HD720
                     stream_url = item['url'] + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
                     hoster = {'multi-part': False, 'url': stream_url, 'class': self, 'quality': quality, 'host': scraper_utils.get_direct_hostname(self, item['url']), 'rating': None, 'views': None, 'direct': True}
                     hosters.append(hoster)
     return hosters
示例#20
0
    def _get_episode_url(self, show_url, video):
        query = scraper_utils.parse_query(show_url)
        if 'id' in query:
            url = scraper_utils.urljoin(self.base_url,
                                        '/api/v2/shows/%s' % (query['id']))
            js_data = self._http_get(url, cache_limit=.5)
            if 'episodes' in js_data:
                force_title = scraper_utils.force_title(video)
                if not force_title:
                    for episode in js_data['episodes']:
                        if int(video.season) == int(episode['season']) and int(
                                video.episode) == int(episode['number']):
                            return scraper_utils.pathify_url('?id=%s' %
                                                             (episode['id']))

                    if kodi.get_setting(
                            'airdate-fallback') == 'true' and video.ep_airdate:
                        for episode in js_data['episodes']:
                            if 'airdate' in episode:
                                ep_airdate = scraper_utils.to_datetime(
                                    episode['airdate'], "%Y-%m-%d").date()
                                if video.ep_airdate == (
                                        ep_airdate -
                                        datetime.timedelta(days=1)):
                                    return scraper_utils.pathify_url(
                                        '?id=%s' % (episode['id']))
                else:
                    logger.log(
                        'Skipping S&E matching as title search is forced on: %s'
                        % (video.trakt_id), log_utils.LOGDEBUG)

                if (force_title or kodi.get_setting('title-fallback')
                        == 'true') and video.ep_title:
                    norm_title = scraper_utils.normalize_title(video.ep_title)
                    for episode in js_data['episodes']:
                        if 'name' in episode and norm_title in scraper_utils.normalize_title(
                                episode['name']):
                            return scraper_utils.pathify_url('?id=%s' %
                                                             (episode['id']))
示例#21
0
    def _cached_http_get(self, url, base_url, timeout, params=None, data=None, multipart_data=None, headers=None, cookies=None, allow_redirect=True,
                         method=None, require_debrid=False, read_error=False, cache_limit=8):
        if require_debrid:
            if Scraper.debrid_resolvers is None:
                Scraper.debrid_resolvers = [resolver for resolver in urlresolver.relevant_resolvers() if resolver.isUniversal()]
            if not Scraper.debrid_resolvers:
                logger.log('%s requires debrid: %s' % (self.__module__, Scraper.debrid_resolvers), log_utils.LOGDEBUG)
                return ''
                
        if cookies is None: cookies = {}
        if timeout == 0: timeout = None
        if headers is None: headers = {}
        if url.startswith('//'): url = 'http:' + url
        referer = headers['Referer'] if 'Referer' in headers else base_url
        if params:
            if url == base_url and not url.endswith('/'):
                url += '/'
            
            parts = urlparse.urlparse(url)
            if parts.query:
                params.update(scraper_utils.parse_query(url))
                url = urlparse.urlunparse((parts.scheme, parts.netloc, parts.path, parts.params, '', parts.fragment))
                
            url += '?' + urllib.urlencode(params)
        logger.log('Getting Url: %s cookie=|%s| data=|%s| extra headers=|%s|' % (url, cookies, data, headers), log_utils.LOGDEBUG)
        if data is not None:
            if isinstance(data, basestring):
                data = data
            else:
                data = urllib.urlencode(data, True)

        if multipart_data is not None:
            headers['Content-Type'] = 'multipart/form-data; boundary=X-X-X'
            data = multipart_data

        _created, _res_header, html = self.db_connection().get_cached_url(url, data, cache_limit)
        if html:
            logger.log('Returning cached result for: %s' % (url), log_utils.LOGDEBUG)
            return html

        try:
            self.cj = self._set_cookies(base_url, cookies)
            if isinstance(url, unicode): url = url.encode('utf-8')
            request = urllib2.Request(url, data=data)
            headers = headers.copy()
            request.add_header('User-Agent', scraper_utils.get_ua())
            request.add_header('Accept', '*/*')
            request.add_header('Accept-Encoding', 'gzip')
            request.add_unredirected_header('Host', request.get_host())
            if referer: request.add_unredirected_header('Referer', referer)
            if 'Referer' in headers: del headers['Referer']
            if 'Host' in headers: del headers['Host']
            for key, value in headers.iteritems(): request.add_header(key, value)
            self.cj.add_cookie_header(request)
            if not allow_redirect:
                opener = urllib2.build_opener(NoRedirection)
                urllib2.install_opener(opener)
            else:
                opener = urllib2.build_opener(urllib2.HTTPRedirectHandler)
                urllib2.install_opener(opener)
                opener2 = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cj))
                urllib2.install_opener(opener2)

            if method is not None: request.get_method = lambda: method.upper()
            response = urllib2.urlopen(request, timeout=timeout)
            self.cj.extract_cookies(response, request)
            if kodi.get_setting('cookie_debug') == 'true':
                logger.log('Response Cookies: %s - %s' % (url, scraper_utils.cookies_as_str(self.cj)), log_utils.LOGDEBUG)
            self.cj._cookies = scraper_utils.fix_bad_cookies(self.cj._cookies)
            self.cj.save(ignore_discard=True)
            if not allow_redirect and (response.getcode() in [301, 302, 303, 307] or response.info().getheader('Refresh')):
                if response.info().getheader('Refresh') is not None:
                    refresh = response.info().getheader('Refresh')
                    return refresh.split(';')[-1].split('url=')[-1]
                else:
                    redir_url = response.info().getheader('Location')
                    if redir_url.startswith('='):
                        redir_url = redir_url[1:]
                    return redir_url
            
            content_length = response.info().getheader('Content-Length', 0)
            if int(content_length) > MAX_RESPONSE:
                logger.log('Response exceeded allowed size. %s => %s / %s' % (url, content_length, MAX_RESPONSE), log_utils.LOGWARNING)
            
            if method == 'HEAD':
                return ''
            else:
                if response.info().get('Content-Encoding') == 'gzip':
                    html = ungz(response.read(MAX_RESPONSE))
                else:
                    html = response.read(MAX_RESPONSE)
        except urllib2.HTTPError as e:
            if e.info().get('Content-Encoding') == 'gzip':
                html = ungz(e.read(MAX_RESPONSE))
            else:
                html = e.read(MAX_RESPONSE)
                
            if CF_CAPCHA_ENABLED and e.code == 403 and 'cf-captcha-bookmark' in html:
                html = cf_captcha.solve(url, self.cj, scraper_utils.get_ua(), self.get_name())
                if not html:
                    return ''
            elif e.code == 503 and 'cf-browser-verification' in html:
                html = cloudflare.solve(url, self.cj, scraper_utils.get_ua(), extra_headers=headers)
                if not html:
                    return ''
            else:
                logger.log('Error (%s) during scraper http get: %s' % (str(e), url), log_utils.LOGWARNING)
                if not read_error:
                    return ''
        except Exception as e:
            logger.log('Error (%s) during scraper http get: %s' % (str(e), url), log_utils.LOGWARNING)
            return ''

        self.db_connection().cache_url(url, html, data)
        return html
示例#22
0
    def _cached_http_get(self,
                         url,
                         base_url,
                         timeout,
                         params=None,
                         data=None,
                         multipart_data=None,
                         headers=None,
                         cookies=None,
                         allow_redirect=True,
                         method=None,
                         require_debrid=False,
                         read_error=False,
                         cache_limit=8):
        if require_debrid:
            if Scraper.debrid_resolvers is None:
                Scraper.debrid_resolvers = [
                    resolver for resolver in urlresolver.relevant_resolvers()
                    if resolver.isUniversal()
                ]
            if not Scraper.debrid_resolvers:
                logger.log(
                    '%s requires debrid: %s' %
                    (self.__module__, Scraper.debrid_resolvers),
                    log_utils.LOGDEBUG)
                return ''

        if cookies is None: cookies = {}
        if timeout == 0: timeout = None
        if headers is None: headers = {}
        if url.startswith('//'): url = 'http:' + url
        referer = headers['Referer'] if 'Referer' in headers else base_url
        if params:
            if url == base_url and not url.endswith('/'):
                url += '/'

            parts = urlparse.urlparse(url)
            if parts.query:
                params.update(scraper_utils.parse_query(url))
                url = urlparse.urlunparse(
                    (parts.scheme, parts.netloc, parts.path, parts.params, '',
                     parts.fragment))

            url += '?' + urllib.urlencode(params)
        logger.log(
            'Getting Url: %s cookie=|%s| data=|%s| extra headers=|%s|' %
            (url, cookies, data, headers), log_utils.LOGDEBUG)
        if data is not None:
            if isinstance(data, basestring):
                data = data
            else:
                data = urllib.urlencode(data, True)

        if multipart_data is not None:
            headers['Content-Type'] = 'multipart/form-data; boundary=X-X-X'
            data = multipart_data

        _created, _res_header, html = self.db_connection().get_cached_url(
            url, data, cache_limit)
        if html:
            logger.log('Returning cached result for: %s' % (url),
                       log_utils.LOGDEBUG)
            return html

        try:
            self.cj = self._set_cookies(base_url, cookies)
            if isinstance(url, unicode): url = url.encode('utf-8')
            request = urllib2.Request(url, data=data)
            headers = headers.copy()
            request.add_header('User-Agent', scraper_utils.get_ua())
            request.add_header('Accept', '*/*')
            request.add_header('Accept-Encoding', 'gzip')
            request.add_unredirected_header('Host', request.get_host())
            if referer: request.add_unredirected_header('Referer', referer)
            if 'Referer' in headers: del headers['Referer']
            if 'Host' in headers: del headers['Host']
            for key, value in headers.iteritems():
                request.add_header(key, value)
            self.cj.add_cookie_header(request)
            if not allow_redirect:
                opener = urllib2.build_opener(NoRedirection)
                urllib2.install_opener(opener)
            else:
                opener = urllib2.build_opener(urllib2.HTTPRedirectHandler)
                urllib2.install_opener(opener)
                opener2 = urllib2.build_opener(
                    urllib2.HTTPCookieProcessor(self.cj))
                urllib2.install_opener(opener2)

            if method is not None: request.get_method = lambda: method.upper()
            response = urllib2.urlopen(request, timeout=timeout)
            self.cj.extract_cookies(response, request)
            if kodi.get_setting('cookie_debug') == 'true':
                logger.log(
                    'Response Cookies: %s - %s' %
                    (url, scraper_utils.cookies_as_str(self.cj)),
                    log_utils.LOGDEBUG)
            self.cj._cookies = scraper_utils.fix_bad_cookies(self.cj._cookies)
            self.cj.save(ignore_discard=True)
            if not allow_redirect and (
                    response.getcode() in [301, 302, 303, 307]
                    or response.info().getheader('Refresh')):
                if response.info().getheader('Refresh') is not None:
                    refresh = response.info().getheader('Refresh')
                    return refresh.split(';')[-1].split('url=')[-1]
                else:
                    redir_url = response.info().getheader('Location')
                    if redir_url.startswith('='):
                        redir_url = redir_url[1:]
                    return redir_url

            content_length = response.info().getheader('Content-Length', 0)
            if int(content_length) > MAX_RESPONSE:
                logger.log(
                    'Response exceeded allowed size. %s => %s / %s' %
                    (url, content_length, MAX_RESPONSE), log_utils.LOGWARNING)

            if method == 'HEAD':
                return ''
            else:
                if response.info().get('Content-Encoding') == 'gzip':
                    html = ungz(response.read(MAX_RESPONSE))
                else:
                    html = response.read(MAX_RESPONSE)
        except urllib2.HTTPError as e:
            if e.info().get('Content-Encoding') == 'gzip':
                html = ungz(e.read(MAX_RESPONSE))
            else:
                html = e.read(MAX_RESPONSE)

            if CF_CAPCHA_ENABLED and e.code == 403 and 'cf-captcha-bookmark' in html:
                html = cf_captcha.solve(url, self.cj, scraper_utils.get_ua(),
                                        self.get_name())
                if not html:
                    return ''
            elif e.code == 503 and 'cf-browser-verification' in html:
                html = cloudflare.solve(url,
                                        self.cj,
                                        scraper_utils.get_ua(),
                                        extra_headers=headers)
                if not html:
                    return ''
            else:
                logger.log(
                    'Error (%s) during scraper http get: %s' % (str(e), url),
                    log_utils.LOGWARNING)
                if not read_error:
                    return ''
        except Exception as e:
            logger.log(
                'Error (%s) during scraper http get: %s' % (str(e), url),
                log_utils.LOGWARNING)
            return ''

        self.db_connection().cache_url(url, html, data)
        return html
示例#23
0
 def __translate_search(self, url):
     query = {'moderated': 'yes', 'offset': 0, 'limit': self.max_results, 'match': 'all', 'cached': 'yes', 'attrs': 'name'}
     query['q'] = scraper_utils.parse_query(url)['query']
     return query
示例#24
0
 def _get_episode_url(self, season_url, video):
     query = scraper_utils.parse_query(season_url)
     if 'hash' in query:
         for stream in self.__get_videos(season_url, video):
             if scraper_utils.release_check(video, stream['name']):
                 return season_url
示例#25
0
 def __translate_search(self, url):
     params = SEARCH_PARAMS
     params['pby'] = self.max_results
     params['gps'] = params['sbj'] = scraper_utils.parse_query(url)['query']
     url = scraper_utils.urljoin(self.base_url, SEARCH_URL)
     return url, params
 def _get_episode_url(self, season_url, video):
     query = scraper_utils.parse_query(season_url)
     if 'hash' in query:
         for stream in self.__get_videos(season_url, video):
             if scraper_utils.release_check(video, stream['name']):
                 return season_url
示例#27
0
 def __translate_search(self, url):
     params = SEARCH_PARAMS
     params['pby'] = self.max_results
     params['gps'] = params['sbj'] = scraper_utils.parse_query(url)['query']
     url = scraper_utils.urljoin(self.base_url, SEARCH_URL)
     return url, params