コード例 #1
0
    def _get_episode_url(self, show_url, video):
        params = scraper_utils.parse_query(show_url)
        cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"tvshowid": %s, "season": %s, "filter": {"field": "%s", "operator": "is", "value": "%s"}, \
        "limits": { "start" : 0, "end": 25 }, "properties" : ["title", "season", "episode", "file", "streamdetails"], "sort": { "order": "ascending", "method": "label", "ignorearticle": true }}, "id": "libTvShows"}'

        base_url = 'video_type=%s&id=%s'
        episodes = []
        force_title = scraper_utils.force_title(video)
        if not force_title:
            run = cmd % (params['id'], video.season, 'episode', video.episode)
            meta = xbmc.executeJSONRPC(run)
            meta = scraper_utils.parse_json(meta)
            logger.log('Episode Meta: %s' % (meta), log_utils.LOGDEBUG)
            if 'result' in meta and 'episodes' in meta['result']:
                episodes = meta['result']['episodes']
        else:
            logger.log(
                'Skipping S&E matching as title search is forced on: %s' %
                (video.trakt_id), log_utils.LOGDEBUG)

        if (force_title or kodi.get_setting('title-fallback')
                == 'true') and video.ep_title and not episodes:
            run = cmd % (params['id'], video.season, 'title', video.ep_title)
            meta = xbmc.executeJSONRPC(run)
            meta = scraper_utils.parse_json(meta)
            logger.log('Episode Title Meta: %s' % (meta), log_utils.LOGDEBUG)
            if 'result' in meta and 'episodes' in meta['result']:
                episodes = meta['result']['episodes']

        for episode in episodes:
            if episode['file'].endswith('.strm'):
                continue

            return base_url % (video.video_type, episode['episodeid'])
コード例 #2
0
    def get_sources(self, video):
        hosters = []
        sources = {}
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(page_url, cache_limit=.5)
        match = re.search("load_player\('([^']+)", html)
        if not match: return hosters
        
        headers = {'Referer': page_url, 'Server': 'cloudflare-nginx', 'Accept': 'text/html, */*; q=0.01',
                   'Accept-Language': 'en-US,en;q=0.5', 'Accept-Formating': 'application/json, text/javascript', 'Accept-Encoding': 'gzip, deflate'}
        headers.update(XHR)
        params = {'id': match.group(1)}
        player_url = scraper_utils.urljoin(self.base_url, PLAYER_URL)
        html = self._http_get(player_url, params=params, headers=headers, cache_limit=1)
        js_data = scraper_utils.parse_json(html, player_url)
        pl_url = js_data.get('value') or js_data.get('download')
        if not pl_url: return hosters
        
        headers = {'Referer': page_url}
        if pl_url.startswith('//'): pl_url = 'https:' + pl_url
        html = self._http_get(pl_url, headers=headers, allow_redirect=False, cache_limit=0)
        if html.startswith('http'):
            streams = [(html, '')]
        else:
            js_data = scraper_utils.parse_json(html, pl_url)
            try: streams = [(source['file'], source.get('label', '')) for source in js_data['playlist'][0]['sources']]
            except: streams = []
            
        for stream in streams:
            stream_url, label = stream
            if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo':
                sources[stream_url] = {'quality': scraper_utils.gv_get_quality(stream_url), 'direct': True}
            else:
                if label:
                    quality = scraper_utils.height_get_quality(label)
                else:
                    quality = QUALITIES.HIGH
                sources[stream_url] = {'quality': quality, 'direct': False}
                    
        for source, value in sources.iteritems():
            direct = value['direct']
            quality = value['quality']
            if direct:
                host = scraper_utils.get_direct_hostname(self, source)
            else:
                host = urlparse.urlparse(source).hostname

            stream_url = source + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
            hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': direct}
            hosters.append(hoster)
            
        return hosters
コード例 #3
0
 def __get_links_from_json2(self, url, page_url, video_type):
     sources = {}
     headers = {'Referer': page_url}
     headers.update(XHR)
     html = self._http_get(url, headers=headers, cache_limit=0)
     js_data = scraper_utils.parse_json(html, url)
     try:
         playlist = js_data.get('playlist', [])
         for source in playlist[0].get('sources', []):
             stream_url = source['file']
             if scraper_utils.get_direct_hostname(self,
                                                  stream_url) == 'gvideo':
                 quality = scraper_utils.gv_get_quality(stream_url)
             elif 'label' in source:
                 quality = scraper_utils.height_get_quality(source['label'])
             else:
                 if video_type == VIDEO_TYPES.MOVIE:
                     meta = scraper_utils.parse_movie_link(stream_url)
                 else:
                     meta = scraper_utils.parse_episode_link(stream_url)
                 quality = scraper_utils.height_get_quality(meta['height'])
             sources[stream_url] = {'quality': quality, 'direct': True}
             logger.log(
                 'Adding stream: %s Quality: %s' % (stream_url, quality),
                 log_utils.LOGDEBUG)
     except Exception as e:
         logger.log('Exception during yesmovies extract: %s' % (e),
                    log_utils.LOGDEBUG)
     return sources
コード例 #4
0
    def __get_params(self, grab_url, episode_id, movie_id, page_url):
        hash_id, token, ts = None, None, None
        url = scraper_utils.urljoin(grab_url,
                                    '/token_v2.php',
                                    replace_path=True)
        headers = {'Referer': page_url}
        params = {
            'eid': episode_id,
            'mid': movie_id,
            '_': int(time.time() * 1000)
        }
        html = self._http_get(url,
                              params=params,
                              headers=headers,
                              cache_limit=0)
        if aa_decoder.is_aaencoded(html):
            html = aa_decoder.decode(html)
            match1 = re.search("hash\s*=\s*'([^']+)", html)
            match2 = re.search("token\s*=\s*'([^']+)", html)
            match3 = re.search("_\s*=\s*'([^']+)", html)
            if match1 and match2 and match3:
                hash_id = match1.group(1)
                token = match2.group(1)
                ts = match3.group(1)
        else:
            js_data = scraper_utils.parse_json(html, url)
            hash_id, token, ts = js_data.get('hash'), js_data.get(
                'token'), js_data.get('_')

        return hash_id, token, ts
コード例 #5
0
 def __get_ok(self, embed, flashvars):
     hosters = []
     link = flashvars[0].attrs['value']
     match = re.search('metadataUrl=([^"]+)', link)
     if match:
         referer = scraper_utils.cleanse_title(
             urllib.unquote(embed[0].attrs['data']))
         ok_url = scraper_utils.cleanse_title(urllib.unquote(
             match.group(1)))
         html = self._http_get(ok_url,
                               data='ok',
                               headers={'Referer': referer},
                               cache_limit=.25)
         js_data = scraper_utils.parse_json(html, ok_url)
         stream_url = js_data.get('movie', {}).get('url')
         if stream_url is not None:
             host = urlparse.urlparse(stream_url).hostname
             hoster = {
                 'multi-part': False,
                 'host': host,
                 'class': self,
                 'quality': QUALITIES.HD720,
                 'views': None,
                 'rating': None,
                 'url': stream_url,
                 'direct': False,
                 'subs': 'Turkish Subtitles'
             }
             hosters.append(hoster)
     return hosters
コード例 #6
0
 def __get_gk_links(self, html):
     sources = {}
     match = re.search('{link\s*:\s*"([^"]+)', html)
     if match:
         iframe_url = match.group(1)
         data = {'link': iframe_url}
         headers = {'Referer': iframe_url}
         html = self._http_get(self.gk_url, data=data, headers=headers, cache_limit=.5)
         js_data = scraper_utils.parse_json(html, self.gk_url)
         links = js_data.get('link', [])
         if isinstance(links, basestring):
             links = [{'link': links}]
             
         for link in links:
             stream_url = link['link']
             if scraper_utils.get_direct_hostname(self, stream_url) == 'openload.co':
                 quality = scraper_utils.gv_get_quality(stream_url)
                 direct = True
             elif 'label' in link:
                 quality = scraper_utils.height_get_quality(link['label'])
                 direct = True
             else:
                 quality = QUALITIES.HIGH
                 direct = False
             sources[stream_url] = {'quality': quality, 'direct': direct}
     return sources
コード例 #7
0
    def search(self, video_type, title, year, season=''):  # @UnusedVariable
        results = []
        search_url = scraper_utils.urljoin(self.base_url,
                                           '/search/searchBoxSuggestion')
        html = self._http_get(search_url,
                              params={
                                  'top': 8,
                                  'query': title
                              },
                              cache_limit=8)
        js_data = scraper_utils.parse_json(html, search_url)
        for item in js_data:
            entityName = match_title_year = item.get('Value', '')
            if entityName:
                match_title, match_year2 = scraper_utils.extra_year(
                    match_title_year)
                match_year = str(item.get('ReleaseYear', ''))
                if not match_year: match_year = match_year2

                match_url = '/ontology/EntityDetails?' + urllib.urlencode(
                    {
                        'entityName': entityName,
                        'ignoreMediaLinkError': 'false'
                    })
                if not year or not match_year or year == match_year:
                    result = {
                        'title': scraper_utils.cleanse_title(match_title),
                        'year': match_year,
                        'url': scraper_utils.pathify_url(match_url)
                    }
                    results.append(result)

        return results
コード例 #8
0
    def search(self, video_type, title, year, season=''):  # @UnusedVariable
        results = []
        media_type = 'series' if video_type == VIDEO_TYPES.TVSHOW else 'movie'
        search_url = scraper_utils.urljoin(
            self.base_url, '/typeahead/%s' % (urllib.quote(title)))
        headers = {'Referer': self.base_url}
        headers.update(XHR)
        html = self._http_get(search_url,
                              headers=headers,
                              require_debrid=True,
                              cache_limit=.5)
        for item in scraper_utils.parse_json(html, search_url):
            match_title = item.get('title')
            match_url = item.get('link')
            match_year = ''
            if item.get('type') == media_type and match_title and match_url:
                if not year or not match_year or year == match_year:
                    result = {
                        'title': scraper_utils.cleanse_title(match_title),
                        'year': match_year,
                        'url': scraper_utils.pathify_url(match_url)
                    }
                    results.append(result)

        return results
コード例 #9
0
    def __get_movie_sources(self, page_url):
        sources = []
        headers = {'Referer': ''}
        html = self._http_get(page_url, headers=headers, cache_limit=.5)
        match = re.search('APP_PATH\+"([^"]+)"\+([^"]+)\+"([^"]+)"', html)
        if match:
            url1, var, url2 = match.groups()
            match = re.search("%s\s*=\s*'([^']+)" % (var), html)
            if match:
                headers = {'Referer': page_url}
                headers.update(XHR)
                contents_url = '/' + url1 + match.group(1) + url2
                contents_url = scraper_utils.urljoin(self.base_url,
                                                     contents_url)
                js_data = scraper_utils.parse_json(
                    self._http_get(contents_url,
                                   headers=headers,
                                   cache_limit=2), contents_url)
                if js_data:
                    sources = [
                        item['src'] for item in js_data if 'src' in item
                    ]

        match = re.search("openloadLink\s*=\s*'([^']+)", html, re.I)
        if match:
            sources.append(match.group(1))

        return sources
コード例 #10
0
    def _http_get(self,
                  url,
                  params=None,
                  data=None,
                  allow_redirect=True,
                  cache_limit=8):
        if not self.username or not self.password:
            return {}

        if data is None: data = {}
        data.update({'customer_id': self.username, 'pin': self.password})
        result = super(self.__class__,
                       self)._http_get(url,
                                       params=params,
                                       data=data,
                                       allow_redirect=allow_redirect,
                                       cache_limit=cache_limit)
        js_result = scraper_utils.parse_json(result, url)
        if 'status' in js_result and js_result['status'] == 'error':
            logger.log(
                'Premiumize V2 Scraper Error: %s - (%s)' %
                (url, js_result.get('message', 'Unknown Error')),
                log_utils.LOGWARNING)
            js_result = {}

        return js_result
コード例 #11
0
 def __get_cloud_links(self, html, page_url, sub):
     hosters = []
     html = html.replace('\\"', '"').replace('\\/', '/')
     match = re.search("dizi_kapak_getir\('([^']+)", html)
     if match:
         ep_id = match.group(1)
         for attrs, _content in dom_parser2.parse_dom(html, 'script', {'data-cfasync': 'false'}, req='src'):
             script_url = attrs['src']
             html = self._http_get(script_url, cache_limit=24)
             match1 = re.search("var\s+kapak_url\s*=\s*'([^']+)", html)
             match2 = re.search("var\s+aCtkp\s*=\s*'([^']+)", html)
             if match1 and match2:
                 link_url = '%s?fileid=%s&access_token=%s' % (match1.group(1), ep_id, match2.group(1))
                 headers = {'Referer': page_url}
                 html = self._http_get(link_url, headers=headers, cache_limit=.5)
                 js_data = scraper_utils.parse_json(html, link_url)
                 for variant in js_data.get('variants', {}):
                     stream_host = random.choice(variant.get('hosts', []))
                     if stream_host:
                         stream_url = stream_host + variant['path'] + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua(), 'Referer': page_url})
                         if not stream_url.startswith('http'):
                             stream_url = 'http://' + stream_url
                         host = scraper_utils.get_direct_hostname(self, stream_url)
                         if 'width' in variant:
                             quality = scraper_utils.width_get_quality(variant['width'])
                         elif 'height' in variant:
                             quality = scraper_utils.height_get_quality(variant['height'])
                         else:
                             quality = QUALITIES.HIGH
                         hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
                         hoster['subs'] = sub
                         hosters.append(hoster)
     return hosters
コード例 #12
0
    def __get_ajax_sources(self, html, page_url):
        hosters = []
        match = re.search('''url\s*:\s*"([^"]+)"\s*,\s*data:'id=''', html)
        if match:
            ajax_url = match.group(1)
            for data_id in re.findall("kaynakdegis\('([^']+)", html):
                url = scraper_utils.urljoin(self.base_url, ajax_url)
                data = {'id': data_id}
                headers = {'Referer': page_url}
                headers.update(XHR)
                result = self._http_get(url,
                                        data=data,
                                        headers=headers,
                                        cache_limit=.5)
                js_data = scraper_utils.parse_json(result, url)
                if 'iframe' in js_data:
                    if self.base_url in js_data['iframe']:
                        hosters += self.__get_iframe_sources(
                            js_data['iframe'], page_url)
                    else:
                        hosters.append(
                            self.__create_source(js_data['iframe'],
                                                 720,
                                                 page_url,
                                                 direct=False))
                else:
                    hosters += self.__get_js_sources(js_data, page_url)
                    pass

        return hosters
コード例 #13
0
    def search(self, video_type, title, year, season=''):  # @UnusedVariable
        results = []
        search_url = scraper_utils.urljoin(self.base_url, '/ajax/search.php')
        timestamp = int(time.time() * 1000)
        query = {
            'q': title,
            'limit': 100,
            'timestamp': timestamp,
            'verifiedCheck': ''
        }
        html = self._http_get(search_url,
                              data=query,
                              headers=XHR,
                              cache_limit=1)
        if video_type in [VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE]:
            media_type = 'TV SHOW'
        else:
            media_type = 'MOVIE'

        js_data = scraper_utils.parse_json(html, search_url)
        for item in js_data:
            if not item['meta'].upper().startswith(media_type): continue

            result = {
                'title': scraper_utils.cleanse_title(item['title']),
                'url': scraper_utils.pathify_url(item['permalink']),
                'year': ''
            }
            results.append(result)

        return results
コード例 #14
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(url, headers=XHR, cache_limit=8)
        js_data = scraper_utils.parse_json(html, url)
        quality = Q_MAP.get(
            js_data.get('Key', {}).get('MovieDefinition'), QUALITIES.HIGH)
        value = js_data.get('Value', {})
        stream_url = value.get('VideoLink')
        if stream_url and value.get('ProviderSource', '').lower() == 'youtube':
            host = 'youtube.com'
            source = {
                'multi-part': False,
                'url': stream_url,
                'host': host,
                'class': self,
                'quality': quality,
                'views': None,
                'rating': None,
                'direct': False
            }
            hosters.append(source)

        return hosters
コード例 #15
0
 def _get_episode_url(self, show_url, video):
     object_id = self.__extract_id(show_url)
     if object_id is None: return
     url = scraper_utils.urljoin(self.base_url,
                                 TITLE_URL.format(id=object_id))
     html = self._authed_http_get(url, cache_limit=2)
     js_data = scraper_utils.parse_json(html, url)
     if self.__episode_match(js_data, video):
         return show_url
コード例 #16
0
 def __login(self):
     url = scraper_utils.urljoin(self.base_url, '/apis/v2/user/login.json')
     data = {'email': self.username, 'password': self.password, 'rememberMe': True}
     referer = scraper_utils.urljoin(self.base_url, '/login')
     headers = {'Content-Type': 'application/json', 'Referer': referer}
     headers.update(XHR)
     html = super(self.__class__, self)._http_get(url, data=json.dumps(data), headers=headers, cache_limit=0)
     js_data = scraper_utils.parse_json(html, url)
     return js_data.get('status') == 'success'
コード例 #17
0
 def __login(self):
     url = scraper_utils.urljoin(self.base_url, '/api/v1/user/login')
     data = {'user': self.username, 'password': self.password}
     headers = {'Content-Type': 'application/json'}
     html = self._http_get(url,
                           data=json.dumps(data),
                           headers=headers,
                           cache_limit=0)
     js_data = scraper_utils.parse_json(html, url)
     if 'user' not in js_data: raise Exception('sit2play login failed')
コード例 #18
0
 def __get_link_from_json(self, url):
     sources = {}
     html = self._http_get(url, cache_limit=.5)
     js_result = scraper_utils.parse_json(html, url)
     if 'src' in js_result:
         sources[js_result['src']] = {
             'quality': QUALITIES.HIGH,
             'direct': False
         }
     return sources
コード例 #19
0
    def __get_links(self, url, video):
        hosters = []
        search_url, params = self.__translate_search(url)
        html = self._http_get(search_url, params=params, cache_limit=.5)
        js_result = scraper_utils.parse_json(html, search_url)
        down_url = js_result.get('downURL')
        dl_farm = js_result.get('dlFarm')
        dl_port = js_result.get('dlPort')
        for item in js_result.get('data', []):
            post_hash, size, post_title, ext, duration = item['0'], item['4'], item['10'], item['11'], item['14']
            checks = [False] * 6
            if not scraper_utils.release_check(video, post_title): checks[0] = True
            if 'alangs' in item and item['alangs'] and 'eng' not in item['alangs']: checks[1] = True
            if re.match('^\d+s', duration) or re.match('^[0-5]m', duration): checks[2] = True
            if 'passwd' in item and item['passwd']: checks[3] = True
            if 'virus' in item and item['virus']: checks[4] = True
            if 'type' in item and item['type'].upper() != 'VIDEO': checks[5] = True
            if any(checks):
                logger.log('EasyNews Post excluded: %s - |%s|' % (checks, item), log_utils.LOGDEBUG)
                continue
            
            stream_url = down_url + urllib.quote('/%s/%s/%s%s/%s%s' % (dl_farm, dl_port, post_hash, ext, post_title, ext))
            stream_url = stream_url + '|Authorization=%s' % (urllib.quote(self.auth))
            host = scraper_utils.get_direct_hostname(self, stream_url)
            quality = None
            if 'width' in item:
                try: width = int(item['width'])
                except: width = 0
                if width:
                    quality = scraper_utils.width_get_quality(width)
            
            if quality is None:
                if video.video_type == VIDEO_TYPES.MOVIE:
                    meta = scraper_utils.parse_movie_link(post_title)
                else:
                    meta = scraper_utils.parse_episode_link(post_title)
                quality = scraper_utils.height_get_quality(meta['height'])
                
            if self.max_bytes:
                match = re.search('([\d.]+)\s+(.*)', size)
                if match:
                    size_bytes = scraper_utils.to_bytes(*match.groups())
                    if size_bytes > self.max_bytes:
                        logger.log('Result skipped, Too big: |%s| - %s (%s) > %s (%s GB)' % (post_title, size_bytes, size, self.max_bytes, self.max_gb))
                        continue

            hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': True}
            if any(i for i in ['X265', 'HEVC'] if i in post_title.upper()): hoster['format'] = 'x265'
            if size: hoster['size'] = size
            if post_title: hoster['extra'] = post_title
            hosters.append(hoster)
        return hosters
コード例 #20
0
    def search(self, video_type, title, year, season=''):  # @UnusedVariable
        results = []
        search_url = scraper_utils.urljoin(self.base_url, '/search/ajax_search')
        html = self._http_get(search_url, params={'q': title}, headers=XHR, cache_limit=1)
        js_result = scraper_utils.parse_json(html, search_url)
        match_year = ''
        for series in js_result.get('series', []):
            match_url = series.get('seo')
            match_title = series.get('label')
            if match_url and match_title and (not year or not match_year or year == match_year):
                result = {'url': scraper_utils.pathify_url('/' + match_url), 'title': scraper_utils.cleanse_title(match_title), 'year': match_year}
                results.append(result)

        return results
コード例 #21
0
    def __get_links(self, url, video):
        hosters = []
        seen_urls = set()
        for search_type in SEARCH_TYPES:
            search_url, params = self.__translate_search(url, search_type)
            if not search_url: continue
            html = self._http_get(search_url, params=params, cache_limit=.5)
            js_result = scraper_utils.parse_json(html, search_url)
            if js_result.get('status') != 'success':
                logger.log(
                    'Alluc API Error: |%s|%s|: %s' %
                    (search_url, params,
                     js_result.get('message', 'Unknown Error')),
                    log_utils.LOGWARNING)
                continue

            for result in js_result['result']:
                stream_url = result['hosterurls'][0]['url']
                if len(result['hosterurls']) > 1: continue
                if result['extension'] == 'rar': continue
                if stream_url in seen_urls: continue

                if scraper_utils.release_check(video, result['title']):
                    host = urlparse.urlsplit(stream_url).hostname
                    quality = scraper_utils.get_quality(
                        video, host, self._get_title_quality(result['title']))
                    hoster = {
                        'multi-part': False,
                        'class': self,
                        'views': None,
                        'url': stream_url,
                        'rating': None,
                        'host': host,
                        'quality': quality,
                        'direct': False
                    }
                    hoster['extra'] = scraper_utils.cleanse_title(
                        result['title'])
                    if video.video_type == VIDEO_TYPES.MOVIE:
                        meta = scraper_utils.parse_movie_link(hoster['extra'])
                    else:
                        meta = scraper_utils.parse_episode_link(
                            hoster['extra'])
                    if 'format' in meta: hoster['format'] = meta['format']

                    hosters.append(hoster)
                    seen_urls.add(stream_url)

        return hosters
コード例 #22
0
 def __get_ajax_sources(self, html, page_url):
     stream_url = ''
     match = re.search('''\$\.getJSON\('([^']+)'\s*,\s*(\{.*?\})''', html)
     if match:
         ajax_url, params = match.groups()
         params = scraper_utils.parse_params(params)
         ajax_url = scraper_utils.urljoin(self.base_url, ajax_url)
         headers = {'Referer': page_url}
         headers.update(XHR)
         html = self._http_get(ajax_url,
                               params=params,
                               headers=headers,
                               cache_limit=.5)
         js_data = scraper_utils.parse_json(html, ajax_url)
         stream_url = js_data.get('file', '')
     return stream_url
コード例 #23
0
    def get_sources(self, video):
        hosters = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(page_url, cache_limit=0)
        match = re.search('var\s*video_id\s*=\s*"([^"]+)', html)
        if not match: return hosters

        video_id = match.group(1)
        headers = {'Referer': page_url}
        headers.update(XHR)
        _html = self._http_get(scraper_utils.urljoin(self.base_url, 'av'),
                               headers=headers,
                               method='POST',
                               cache_limit=0)

        vid_url = scraper_utils.urljoin(self.base_url, VIDEO_URL)
        html = self._http_get(vid_url,
                              data={'v': video_id},
                              headers=headers,
                              cache_limit=0)
        for source, value in scraper_utils.parse_json(html,
                                                      vid_url).iteritems():
            match = re.search('url=(.*)', value)
            if not match: continue
            stream_url = urllib.unquote(match.group(1))

            host = scraper_utils.get_direct_hostname(self, stream_url)
            if host == 'gvideo':
                quality = scraper_utils.gv_get_quality(stream_url)
            else:
                quality = scraper_utils.height_get_quality(source)
            stream_url += scraper_utils.append_headers(
                {'User-Agent': scraper_utils.get_ua()})
            hoster = {
                'multi-part': False,
                'host': host,
                'class': self,
                'quality': quality,
                'views': None,
                'rating': None,
                'url': stream_url,
                'direct': True
            }
            hosters.append(hoster)
        return hosters
コード例 #24
0
    def search(self, video_type, title, year, season=''):  # @UnusedVariable
        results = []
        url = scraper_utils.urljoin(self.base_url, AJAX_URL)
        data = {'type': 'getDizi'}
        headers = {'Referer': scraper_utils.urljoin(self.base_url, '/arsiv')}
        headers.update(XHR)
        html = self._http_get(url, data=data, headers=headers, cache_limit=48)
        norm_title = scraper_utils.normalize_title(title)
        match_year = ''
        js_data = scraper_utils.parse_json(html, url)
        for item in js_data.get('data', []):
            match_title = item.get('adi', '')
            if 'url' in item and norm_title in scraper_utils.normalize_title(match_title):
                result = {'url': scraper_utils.pathify_url(item['url']), 'title': scraper_utils.cleanse_title(match_title), 'year': match_year}
                results.append(result)

        return results
コード例 #25
0
    def search(self, video_type, title, year, season=''):  # @UnusedVariable
        results = []
        self.__get_token()
        if self.__token is None: return results

        search_url, u = self.__get_search_url()
        search_url = scraper_utils.urljoin(API_BASE_URL, search_url)
        timestamp = int(time.time() * 1000)
        s = self.__get_s()
        query = {
            'q': title,
            'limit': '100',
            'timestamp': timestamp,
            'verifiedCheck': self.__token,
            'set': s,
            'rt': self.__get_rt(self.__token + s),
            'sl': self.__get_sl(u)
        }
        headers = {'Referer': self.base_url}
        html = self._http_get(search_url,
                              data=query,
                              headers=headers,
                              cache_limit=1)
        if video_type in [VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE]:
            media_type = 'TV SHOW'
        else:
            media_type = 'MOVIE'

        for item in scraper_utils.parse_json(html, search_url):
            if not item['meta'].upper().startswith(media_type): continue

            match_year = str(
                item['year']) if 'year' in item and item['year'] else ''
            if not year or not match_year or year == match_year:
                result = {
                    'title':
                    scraper_utils.cleanse_title(item['title']),
                    'url':
                    scraper_utils.pathify_url(item['permalink'].replace(
                        '/show/', '/tv-show/')),
                    'year':
                    match_year
                }
                results.append(result)

        return results
コード例 #26
0
    def __get_gvideo_links(self, link):
        sources = []
        html = self._http_get(link, cache_limit=1)
        html = self.__decode_link(html)
        match = re.search('{\s*link\s*:\s*"([^"]+)', html)
        if match:
            data = {'link': match.group(1)}
            headers = {'Referer': link}
            html = self._http_get(GK_URL,
                                  data=data,
                                  headers=headers,
                                  cache_limit=.5)
            js_data = scraper_utils.parse_json(html, data)
            for link in js_data.get('link', []):
                sources.append({'host': '', 'link': link['link']})

        return sources
コード例 #27
0
 def __get_pk_links(self, html):
     hosters = []
     match = re.search('var\s+parametros\s*=\s*"([^"]+)', html)
     if match:
         params = scraper_utils.parse_query(match.group(1))
         if 'pic' in params:
             data = {'sou': 'pic', 'fv': '25', 'url': params['pic']}
             html = self._http_get(PK_URL,
                                   headers=XHR,
                                   data=data,
                                   cache_limit=0)
             js_data = scraper_utils.parse_json(html, PK_URL)
             for item in js_data:
                 if 'url' in item and item['url']:
                     if 'width' in item and item['width']:
                         quality = scraper_utils.width_get_quality(
                             item['width'])
                     elif 'height' in item and item['height']:
                         quality = scraper_utils.height_get_quality(
                             item['height'])
                     else:
                         quality = QUALITIES.HD720
                     stream_url = item['url'] + scraper_utils.append_headers(
                         {'User-Agent': scraper_utils.get_ua()})
                     hoster = {
                         'multi-part':
                         False,
                         'url':
                         stream_url,
                         'class':
                         self,
                         'quality':
                         quality,
                         'host':
                         scraper_utils.get_direct_hostname(
                             self, item['url']),
                         'rating':
                         None,
                         'views':
                         None,
                         'direct':
                         True
                     }
                     hosters.append(hoster)
     return hosters
コード例 #28
0
    def __get_linked_sources(self, html):
        sources = []
        subs = 'Turkish subtitles'
        match = re.search('fvid\s*=\s*"([^"]+)', html)
        if match:
            html = self._http_get(AJAX_URL,
                                  params={'dizi': match.group(1)},
                                  headers=XHR,
                                  cache_limit=.5)
            js_result = scraper_utils.parse_json(html, AJAX_URL)
            # subs are hardcoded if none exist
            subs = '' if 'altyazi' in js_result and js_result[
                'altyazi'] else 'Turkish subtitles'
            for source in js_result.get('success', []):
                if 'src' in source:
                    sources.append(source['src'])

        return {'sources': sources, 'subs': subs}
コード例 #29
0
    def __get_results(self, cmd, result_key, video_type, id_key):
        results = []
        logger.log('Search Command: %s' % (cmd), log_utils.LOGDEBUG)
        meta = xbmc.executeJSONRPC(cmd)
        meta = scraper_utils.parse_json(meta)
        logger.log('Search Meta: %s' % (meta), log_utils.LOGDEBUG)
        for item in meta.get('result', {}).get(result_key, {}):
            if video_type == VIDEO_TYPES.MOVIE and item['file'].endswith(
                    '.strm'):
                continue

            result = {
                'title': item['title'],
                'year': item['year'],
                'url': 'video_type=%s&id=%s' % (video_type, item[id_key])
            }
            results.append(result)
        return results
コード例 #30
0
 def __get_json_links(self, html, sub):
     hosters = []
     js_data = scraper_utils.parse_json(html)
     if 'sources' in js_data:
         for source in js_data.get('sources', []):
             stream_url = source.get('file')
             if stream_url is None: continue
             
             host = scraper_utils.get_direct_hostname(self, stream_url)
             if host == 'gvideo':
                 quality = scraper_utils.gv_get_quality(stream_url)
             elif 'label' in source:
                 quality = scraper_utils.height_get_quality(source['label'])
             else:
                 quality = QUALITIES.HIGH
             hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': True}
             hoster['subs'] = sub
             hosters.append(hoster)
     return hosters