def _get_episode_url(self, show_url, video):
        params = scraper_utils.parse_query(show_url)
        cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"tvshowid": %s, "season": %s, "filter": {"field": "%s", "operator": "is", "value": "%s"}, \
        "limits": { "start" : 0, "end": 25 }, "properties" : ["title", "season", "episode", "file", "streamdetails"], "sort": { "order": "ascending", "method": "label", "ignorearticle": true }}, "id": "libTvShows"}'

        base_url = 'video_type=%s&id=%s'
        episodes = []
        force_title = scraper_utils.force_title(video)
        if not force_title:
            run = cmd % (params['id'], video.season, 'episode', video.episode)
            meta = xbmc.executeJSONRPC(run)
            meta = scraper_utils.parse_json(meta)
            logger.log('Episode Meta: %s' % (meta), log_utils.LOGDEBUG)
            if 'result' in meta and 'episodes' in meta['result']:
                episodes = meta['result']['episodes']
        else:
            logger.log(
                'Skipping S&E matching as title search is forced on: %s' %
                (video.trakt_id), log_utils.LOGDEBUG)

        if (force_title or kodi.get_setting('title-fallback')
                == 'true') and video.ep_title and not episodes:
            run = cmd % (params['id'], video.season, 'title', video.ep_title)
            meta = xbmc.executeJSONRPC(run)
            meta = scraper_utils.parse_json(meta)
            logger.log('Episode Title Meta: %s' % (meta), log_utils.LOGDEBUG)
            if 'result' in meta and 'episodes' in meta['result']:
                episodes = meta['result']['episodes']

        for episode in episodes:
            if episode['file'].endswith('.strm'):
                continue

            return base_url % (video.video_type, episode['episodeid'])
Exemple #2
0
    def __get_json_links(self, html, sub):
        hosters = []
        js_data = scraper_utils.parse_json(html)
        if 'sources' in js_data:
            for source in js_data.get('sources', []):
                stream_url = source.get('file')
                if stream_url is None: continue

                host = scraper_utils.get_direct_hostname(self, stream_url)
                if host == 'gvideo':
                    quality = scraper_utils.gv_get_quality(stream_url)
                elif 'label' in source:
                    quality = scraper_utils.height_get_quality(source['label'])
                else:
                    quality = QUALITIES.HIGH
                hoster = {
                    'multi-part': False,
                    'host': host,
                    'class': self,
                    'quality': quality,
                    'views': None,
                    'rating': None,
                    'url': stream_url,
                    'direct': True
                }
                hoster['subs'] = sub
                hosters.append(hoster)
        return hosters
Exemple #3
0
 def __get_gk_links(self, link, iframe_url):
     sources = {}
     data = {'link': link}
     headers = XHR
     headers.update({'Referer': iframe_url, 'User-Agent': USER_AGENT})
     html = self._http_get(GK_URL, data=data, headers=headers, cache_limit=.25)
     js_data = scraper_utils.parse_json(html, GK_URL)
     if 'link' in js_data:
         if isinstance(js_data['link'], basestring):
             stream_url = js_data['link']
             if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo':
                 for source in scraper_utils.parse_google(self, stream_url):
                     sources[source] = {'quality': scraper_utils.gv_get_quality(source), 'direct': True}
             else:
                 sources[stream_url] = {'quality': QUALITIES.HIGH, 'direct': False}
         else:
             for link in js_data['link']:
                 stream_url = link['link']
                 if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo':
                     quality = scraper_utils.gv_get_quality(stream_url)
                 elif 'label' in link:
                     quality = scraper_utils.height_get_quality(link['label'])
                 else:
                     quality = QUALITIES.HIGH
                 sources[stream_url] = {'quality': quality, 'direct': True}
     return sources
Exemple #4
0
 def __get_source_page(self, video_type, page_url):
     match = re.search('/movie/(.*?)-(\d+)\.html', page_url)
     if not match: return '', '', ''
     slug, movie_id = match.groups()
     
     vid_type = 'movie' if video_type == VIDEO_TYPES.MOVIE else 'series'
     qp_url = QP_URL.format(slug=slug, movie_id=movie_id, vid_type=vid_type)
     qp_url = scraper_utils.urljoin(self.base_url, qp_url)
     headers = {'Referer': scraper_utils.urljoin(self.base_url, page_url)}
     headers.update(XHR)
     html = self._http_get(qp_url, headers=headers, cache_limit=8)
     watching_url = dom_parser2.parse_dom(html, 'a', {'title': re.compile('View all episodes')}, req='href')
     if not watching_url: return '', '', ''
     
     watching_url = watching_url[0].attrs['href']
     page_html = self._http_get(watching_url, headers={'Referer': scraper_utils.urljoin(self.base_url, page_url)}, cache_limit=8)
     for attrs, _content in dom_parser2.parse_dom(page_html, 'img', {'class': 'hidden'}, req='src'):
         _img = self._http_get(attrs['src'], headers={'Referer': watching_url}, cache_limit=8)
     
     sl_url = SL_URL.format(movie_id=movie_id)
     sl_url = scraper_utils.urljoin(self.base_url, sl_url)
     html = self._http_get(sl_url, headers=headers, cache_limit=8)
     js_data = scraper_utils.parse_json(html, sl_url)
     try: html = js_data['html']
     except: html = ''
     return movie_id, watching_url, html
Exemple #5
0
    def __get_params(self, grab_url, episode_id, movie_id, page_url):
        hash_id, token, ts = None, None, None
        url = scraper_utils.urljoin(grab_url,
                                    '/token_v2.php',
                                    replace_path=True)
        headers = {'Referer': page_url}
        params = {
            'eid': episode_id,
            'mid': movie_id,
            '_': int(time.time() * 1000)
        }
        html = self._http_get(url,
                              params=params,
                              headers=headers,
                              cache_limit=0)
        if aa_decoder.is_aaencoded(html):
            html = aa_decoder.decode(html)
            match1 = re.search("hash\s*=\s*'([^']+)", html)
            match2 = re.search("token\s*=\s*'([^']+)", html)
            match3 = re.search("_\s*=\s*'([^']+)", html)
            if match1 and match2 and match3:
                hash_id = match1.group(1)
                token = match2.group(1)
                ts = match3.group(1)
        else:
            js_data = scraper_utils.parse_json(html, url)
            hash_id, token, ts = js_data.get('hash'), js_data.get(
                'token'), js_data.get('_')

        return hash_id, token, ts
    def __get_movie_sources(self, page_url):
        sources = []
        headers = {'Referer': ''}
        html = self._http_get(page_url, headers=headers, cache_limit=.5)
        match = re.search('APP_PATH\+"([^"]+)"\+([^"]+)\+"([^"]+)"', html)
        if match:
            url1, var, url2 = match.groups()
            match = re.search("%s\s*=\s*'([^']+)" % (var), html)
            if match:
                headers = {'Referer': page_url}
                headers.update(XHR)
                contents_url = '/' + url1 + match.group(1) + url2
                contents_url = scraper_utils.urljoin(self.base_url,
                                                     contents_url)
                js_data = scraper_utils.parse_json(
                    self._http_get(contents_url,
                                   headers=headers,
                                   cache_limit=2), contents_url)
                if js_data:
                    sources = [
                        item['src'] for item in js_data if 'src' in item
                    ]

        match = re.search("openloadLink\s*=\s*'([^']+)", html, re.I)
        if match:
            sources.append(match.group(1))

        return sources
Exemple #7
0
    def get_sources(self, video):
        sources = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return sources
        object_id = self.__extract_id(source_url)
        if object_id is None: return sources
        source_url = TITLE_URL.format(id=object_id)
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._authed_http_get(page_url, cache_limit=.5)
        js_data = scraper_utils.parse_json(html, page_url)
        if video.video_type == VIDEO_TYPES.MOVIE:
            links = js_data.get('links', {})
        else:
            links = self.__episode_match(js_data, video)

        prefix = js_data.get('domain', {}).get('prefix')
        suffix = js_data.get('domain', {}).get('suffix')
        for key, path in links.get('links', {}).iteritems():
            for mirror in sorted(list(set(links.get('mirrors', [])))):
                stream_url = TEMPLATE.format(prefix=prefix, mirror=mirror, suffix=suffix, path=path)
                host = scraper_utils.get_direct_hostname(self, stream_url)
                quality = Q_MAP.get(key, QUALITIES.HIGH)
                source = {'multi-part': False, 'url': stream_url, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'direct': True}
                source['version'] = '(Mirror %d)' % (mirror)
                sources.append(source)

        return sources
 def __get_ok(self, embed, flashvars):
     hosters = []
     link = flashvars[0].attrs['value']
     match = re.search('metadataUrl=([^"]+)', link)
     if match:
         referer = scraper_utils.cleanse_title(
             urllib.unquote(embed[0].attrs['data']))
         ok_url = scraper_utils.cleanse_title(urllib.unquote(
             match.group(1)))
         html = self._http_get(ok_url,
                               data='ok',
                               headers={'Referer': referer},
                               cache_limit=.25)
         js_data = scraper_utils.parse_json(html, ok_url)
         stream_url = js_data.get('movie', {}).get('url')
         if stream_url is not None:
             host = urlparse.urlparse(stream_url).hostname
             hoster = {
                 'multi-part': False,
                 'host': host,
                 'class': self,
                 'quality': QUALITIES.HD720,
                 'views': None,
                 'rating': None,
                 'url': stream_url,
                 'direct': False,
                 'subs': 'Turkish Subtitles'
             }
             hosters.append(hoster)
     return hosters
    def _http_get(self,
                  url,
                  params=None,
                  data=None,
                  allow_redirect=True,
                  cache_limit=8):
        if not self.username or not self.password:
            return {}

        if data is None: data = {}
        data.update({'customer_id': self.username, 'pin': self.password})
        result = super(self.__class__,
                       self)._http_get(url,
                                       params=params,
                                       data=data,
                                       allow_redirect=allow_redirect,
                                       cache_limit=cache_limit)
        js_result = scraper_utils.parse_json(result, url)
        if 'status' in js_result and js_result['status'] == 'error':
            logger.log(
                'Premiumize V2 Scraper Error: %s - (%s)' %
                (url, js_result.get('message', 'Unknown Error')),
                log_utils.LOGWARNING)
            js_result = {}

        return js_result
Exemple #10
0
 def __get_links_from_json2(self, url, page_url, video_type):
     sources = {}
     headers = {'Referer': page_url}
     headers.update(XHR)
     html = self._http_get(url, headers=headers, cache_limit=0)
     js_data = scraper_utils.parse_json(html, url)
     try:
         playlist = js_data.get('playlist', [])
         for source in playlist[0].get('sources', []):
             stream_url = source['file']
             if scraper_utils.get_direct_hostname(self, stream_url) == 'gvideo':
                 quality = scraper_utils.gv_get_quality(stream_url)
             elif 'label' in source:
                 quality = scraper_utils.height_get_quality(source['label'])
             else:
                 if video_type == VIDEO_TYPES.MOVIE:
                     meta = scraper_utils.parse_movie_link(stream_url)
                 else:
                     meta = scraper_utils.parse_episode_link(stream_url)
                 quality = scraper_utils.height_get_quality(meta['height'])
             sources[stream_url] = {'quality': quality, 'direct': True}
             logger.log('Adding stream: %s Quality: %s' % (stream_url, quality), log_utils.LOGDEBUG)
     except Exception as e:
         logger.log('Exception during yesmovies extract: %s' % (e), log_utils.LOGDEBUG)
     return sources
 def search(self, video_type, title, year, season=''):  # @UnusedVariable
     results = []
     search_url = scraper_utils.urljoin(self.base_url, SEARCH_URL)
     referer = scraper_utils.urljoin(self.base_url, '/search/?q=%s')
     referer = referer % (urllib.quote_plus(title))
     headers = {'Referer': referer}
     headers.update(XHR)
     params = {
         'searchTerm': title,
         'type': SEARCH_TYPES[video_type],
         'limit': 500
     }
     html = self._http_get(search_url,
                           params=params,
                           headers=headers,
                           auth=False,
                           cache_limit=2)
     js_data = scraper_utils.parse_json(html, search_url)
     if 'results' in js_data:
         for result in js_data['results']:
             match_year = str(result.get('year', ''))
             match_url = result.get('permalink', '')
             match_title = result.get('title', '')
             if not year or not match_year or year == match_year:
                 result = {
                     'title': scraper_utils.cleanse_title(match_title),
                     'year': match_year,
                     'url': scraper_utils.pathify_url(match_url)
                 }
                 results.append(result)
     return results
Exemple #12
0
 def __login(self):
     url = scraper_utils.urljoin(self.base_url, '/api/v1/user/login')
     data = {'user': self.username, 'password': self.password}
     headers = {'Content-Type': 'application/json'}
     html = self._http_get(url, data=json.dumps(data), headers=headers, cache_limit=0)
     js_data = scraper_utils.parse_json(html, url)
     if 'user' not in js_data: raise Exception('sit2play login failed')
Exemple #13
0
    def get_sources(self, video):
        hosters = []
        sources = []
        source_url = self.get_url(video)
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        page_url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(page_url, cache_limit=.25)
        match = re.search('var\s+view_id\s*=\s*"([^"]+)', html)
        if not match: return hosters
        view_id = match.group(1)
        
        for lang in ['or', 'tr']:
            subs = True if lang == 'tr' else False
            view_data = {'id': view_id, 'tip': 'view', 'dil': lang}
            html = self._http_get(self.ajax_url, data=view_data, headers=XHR, cache_limit=.25)
            html = html.strip()
            html = re.sub(r'\\n|\\t', '', html)
            match = re.search('var\s+sources\s*=\s*(\[.*?\])', html)
            if match:
                raw_data = match.group(1)
                raw_data = raw_data.replace('\\', '')
            else:
                raw_data = html
             
            js_data = scraper_utils.parse_json(raw_data, self.ajax_url)
            if 'data' not in js_data: continue
            
            src = dom_parser2.parse_dom(js_data['data'], 'iframe', req='src')
            if not src: continue
            
            html = self._http_get(src[0].attrs['src'], cache_limit=.25)
            for attrs, _content in dom_parser2.parse_dom(html, 'iframe', req='src'):
                src = attrs['src']
                if not src.startswith('http'): continue
                sources.append({'label': '720p', 'file': src, 'direct': False, 'subs': subs})
            
            sources += [{'file': url, 'subs': subs} for url in scraper_utils.parse_sources_list(self, html).iterkeys()]
            
            if sources: break

        for source in sources:
            direct = source.get('direct', True)
            stream_url = source['file'] + scraper_utils.append_headers({'User-Agent': scraper_utils.get_ua()})
            if direct:
                host = scraper_utils.get_direct_hostname(self, stream_url)
                if host == 'gvideo':
                    quality = scraper_utils.gv_get_quality(stream_url)
                elif 'label' in source:
                    quality = scraper_utils.height_get_quality(source['label'])
                else:
                    continue
            else:
                host = urlparse.urlparse(stream_url).hostname
                quality = scraper_utils.height_get_quality(source['label'])
        
            hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': direct}
            if source.get('subs'): hoster['subs'] = 'Turkish Subtitles'
            hosters.append(hoster)
    
        return hosters
Exemple #14
0
 def __get_link_from_json(self, url):
     sources = {}
     html = self._http_get(url, cache_limit=.5)
     js_result = scraper_utils.parse_json(html, url)
     if 'src' in js_result:
         sources[js_result['src']] = {'quality': QUALITIES.HIGH, 'direct': False}
     return sources
Exemple #15
0
    def search(self, video_type, title, year, season=''):  # @UnusedVariable
        results = []
        search_url = scraper_utils.urljoin(self.base_url, '/ajax/search.php')
        timestamp = int(time.time() * 1000)
        query = {
            'q': title,
            'limit': 100,
            'timestamp': timestamp,
            'verifiedCheck': ''
        }
        html = self._http_get(search_url,
                              data=query,
                              headers=XHR,
                              cache_limit=1)
        if video_type in [VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE]:
            media_type = 'TV SHOW'
        else:
            media_type = 'MOVIE'

        js_data = scraper_utils.parse_json(html, search_url)
        for item in js_data:
            if not item['meta'].upper().startswith(media_type): continue

            result = {
                'title': scraper_utils.cleanse_title(item['title']),
                'url': scraper_utils.pathify_url(item['permalink']),
                'year': ''
            }
            results.append(result)

        return results
    def __get_gk_links(self, html):
        sources = {}
        match = re.search('{link\s*:\s*"([^"]+)', html)
        if match:
            iframe_url = match.group(1)
            data = {'link': iframe_url}
            headers = {'Referer': iframe_url}
            html = self._http_get(self.gk_url,
                                  data=data,
                                  headers=headers,
                                  cache_limit=.5)
            js_data = scraper_utils.parse_json(html, self.gk_url)
            links = js_data.get('link', [])
            if isinstance(links, basestring):
                links = [{'link': links}]

            for link in links:
                stream_url = link['link']
                if scraper_utils.get_direct_hostname(self,
                                                     stream_url) == 'gvideo':
                    quality = scraper_utils.gv_get_quality(stream_url)
                    direct = True
                elif 'label' in link:
                    quality = scraper_utils.height_get_quality(link['label'])
                    direct = True
                else:
                    quality = QUALITIES.HIGH
                    direct = False
                sources[stream_url] = {'quality': quality, 'direct': direct}
        return sources
    def search(self, video_type, title, year, season=''):  # @UnusedVariable
        results = []
        self.__get_token()
        if self.__token is None: return results
        
        search_url, u = self.__get_search_url()
        search_url = scraper_utils.urljoin(API_BASE_URL, search_url)
        timestamp = int(time.time() * 1000)
        s = self.__get_s()
        query = {'q': title, 'limit': '100', 'timestamp': timestamp, 'verifiedCheck': self.__token, 'set': s, 'rt': self.__get_rt(self.__token + s),
                 'sl': self.__get_sl(u)}
        headers = {'Referer': self.base_url}
        html = self._http_get(search_url, data=query, headers=headers, cache_limit=1)
        if video_type in [VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE]:
            media_type = 'TV SHOW'
        else:
            media_type = 'MOVIE'

        for item in scraper_utils.parse_json(html, search_url):
            if not item['meta'].upper().startswith(media_type): continue
            
            match_year = str(item['year']) if 'year' in item and item['year'] else ''
            if not year or not match_year or year == match_year:
                result = {'title': scraper_utils.cleanse_title(item['title']), 'url': scraper_utils.pathify_url(item['permalink'].replace('/show/', '/tv-show/')), 'year': match_year}
                results.append(result)

        return results
Exemple #18
0
    def search(self, video_type, title, year, season=''):  # @UnusedVariable
        results = []
        search_url = scraper_utils.urljoin(self.base_url,
                                           '/search/searchBoxSuggestion')
        html = self._http_get(search_url,
                              params={
                                  'top': 8,
                                  'query': title
                              },
                              cache_limit=8)
        js_data = scraper_utils.parse_json(html, search_url)
        for item in js_data:
            entityName = match_title_year = item.get('Value', '')
            if entityName:
                match_title, match_year2 = scraper_utils.extra_year(
                    match_title_year)
                match_year = str(item.get('ReleaseYear', ''))
                if not match_year: match_year = match_year2

                match_url = '/ontology/EntityDetails?' + urllib.urlencode(
                    {
                        'entityName': entityName,
                        'ignoreMediaLinkError': 'false'
                    })
                if not year or not match_year or year == match_year:
                    result = {
                        'title': scraper_utils.cleanse_title(match_title),
                        'year': match_year,
                        'url': scraper_utils.pathify_url(match_url)
                    }
                    results.append(result)

        return results
Exemple #19
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if not source_url or source_url == FORCE_NO_MATCH: return hosters
        url = scraper_utils.urljoin(self.base_url, source_url)
        html = self._http_get(url, headers=XHR, cache_limit=8)
        js_data = scraper_utils.parse_json(html, url)
        quality = Q_MAP.get(
            js_data.get('Key', {}).get('MovieDefinition'), QUALITIES.HIGH)
        value = js_data.get('Value', {})
        stream_url = value.get('VideoLink')
        if stream_url and value.get('ProviderSource', '').lower() == 'youtube':
            host = 'youtube.com'
            source = {
                'multi-part': False,
                'url': stream_url,
                'host': host,
                'class': self,
                'quality': quality,
                'views': None,
                'rating': None,
                'direct': False
            }
            hosters.append(source)

        return hosters
Exemple #20
0
 def _get_episode_url(self, show_url, video):
     object_id = self.__extract_id(show_url)
     if object_id is None: return
     url = scraper_utils.urljoin(self.base_url, TITLE_URL.format(id=object_id))
     html = self._authed_http_get(url, cache_limit=2)
     js_data = scraper_utils.parse_json(html, url)
     if self.__episode_match(js_data, video):
         return show_url
 def _http_get(self, url, data=None, headers=None, cookies=None, cache_limit=8):
     # return all uncached blank pages if no user or pass
     if not self.username or not self.password:
         return ''
     
     if headers is None: headers = {}
     auth_header = base64.b64encode('%s:%s' % (self.username, self.password))
     headers['Authorization'] = 'Basic %s' % (auth_header)
     html = super(self.__class__, self)._http_get(url, data=data, headers=headers, cookies=cookies, cache_limit=cache_limit)
     return scraper_utils.parse_json(html, url)
Exemple #22
0
 def __get_cloud_links(self, html, page_url, sub):
     hosters = []
     html = html.replace('\\"', '"').replace('\\/', '/')
     match = re.search("dizi_kapak_getir\('([^']+)", html)
     if match:
         ep_id = match.group(1)
         for attrs, _content in dom_parser2.parse_dom(
                 html, 'script', {'data-cfasync': 'false'}, req='src'):
             script_url = attrs['src']
             html = self._http_get(script_url, cache_limit=24)
             match1 = re.search("var\s+kapak_url\s*=\s*'([^']+)", html)
             match2 = re.search("var\s+aCtkp\s*=\s*'([^']+)", html)
             if match1 and match2:
                 link_url = '%s?fileid=%s&access_token=%s' % (
                     match1.group(1), ep_id, match2.group(1))
                 headers = {'Referer': page_url}
                 html = self._http_get(link_url,
                                       headers=headers,
                                       cache_limit=.5)
                 js_data = scraper_utils.parse_json(html, link_url)
                 for variant in js_data.get('variants', {}):
                     stream_host = random.choice(variant.get('hosts', []))
                     if stream_host:
                         stream_url = stream_host + variant[
                             'path'] + scraper_utils.append_headers(
                                 {
                                     'User-Agent': scraper_utils.get_ua(),
                                     'Referer': page_url
                                 })
                         if not stream_url.startswith('http'):
                             stream_url = 'http://' + stream_url
                         host = scraper_utils.get_direct_hostname(
                             self, stream_url)
                         if 'width' in variant:
                             quality = scraper_utils.width_get_quality(
                                 variant['width'])
                         elif 'height' in variant:
                             quality = scraper_utils.height_get_quality(
                                 variant['height'])
                         else:
                             quality = QUALITIES.HIGH
                         hoster = {
                             'multi-part': False,
                             'host': host,
                             'class': self,
                             'quality': quality,
                             'views': None,
                             'rating': None,
                             'url': stream_url,
                             'direct': True
                         }
                         hoster['subs'] = sub
                         hosters.append(hoster)
     return hosters
 def __get_iframe_sources(self, iframe_url, page_url):
     hosters = []
     headers = {'Referer': page_url}
     html = self._http_get(iframe_url, headers=headers, cache_limit=.5)
     sources = dom_parser2.parse_dom(html, 'div', {'class': 'dzst-player'}, req='data-dzst-player')
     if sources:
         sources = scraper_utils.cleanse_title(sources[0].attrs['data-dzst-player'].replace('=', '='))
         js_data = scraper_utils.parse_json(scraper_utils.cleanse_title(sources), iframe_url)
         sources = js_data.get('tr', {})
         for key in sources:
             hosters.append(self.__create_source(sources[key], key, page_url, subs=True))
         
     return hosters
Exemple #24
0
 def __get_linked_sources(self, html):
     sources = []
     subs = 'Turkish subtitles'
     match = re.search('fvid\s*=\s*"([^"]+)', html)
     if match:
         html = self._http_get(AJAX_URL, params={'dizi': match.group(1)}, headers=XHR, cache_limit=.5)
         js_result = scraper_utils.parse_json(html, AJAX_URL)
         # subs are hardcoded if none exist
         subs = '' if 'altyazi' in js_result and js_result['altyazi'] else 'Turkish subtitles'
         for source in js_result.get('success', []):
             if 'src' in source:
                 sources.append(source['src'])
                     
     return {'sources': sources, 'subs': subs}
Exemple #25
0
    def search(self, video_type, title, year, season=''):  # @UnusedVariable
        results = []
        search_url = scraper_utils.urljoin(self.base_url, '/search/ajax_search')
        html = self._http_get(search_url, params={'q': title}, headers=XHR, cache_limit=1)
        js_result = scraper_utils.parse_json(html, search_url)
        match_year = ''
        for series in js_result.get('series', []):
            match_url = series.get('seo')
            match_title = series.get('label')
            if match_url and match_title and (not year or not match_year or year == match_year):
                result = {'url': scraper_utils.pathify_url('/' + match_url), 'title': scraper_utils.cleanse_title(match_title), 'year': match_year}
                results.append(result)

        return results
    def __get_links(self, url, video):
        hosters = []
        seen_urls = set()
        for search_type in SEARCH_TYPES:
            search_url, params = self.__translate_search(url, search_type)
            if not search_url: continue
            html = self._http_get(search_url, params=params, cache_limit=.5)
            js_result = scraper_utils.parse_json(html, search_url)
            if js_result.get('status') != 'success':
                logger.log(
                    'Alluc API Error: |%s|%s|: %s' %
                    (search_url, params,
                     js_result.get('message', 'Unknown Error')),
                    log_utils.LOGWARNING)
                continue

            for result in js_result['result']:
                stream_url = result['hosterurls'][0]['url']
                if len(result['hosterurls']) > 1: continue
                if result['extension'] == 'rar': continue
                if stream_url in seen_urls: continue

                if scraper_utils.release_check(video, result['title']):
                    host = urlparse.urlsplit(stream_url).hostname
                    quality = scraper_utils.get_quality(
                        video, host, self._get_title_quality(result['title']))
                    hoster = {
                        'multi-part': False,
                        'class': self,
                        'views': None,
                        'url': stream_url,
                        'rating': None,
                        'host': host,
                        'quality': quality,
                        'direct': False
                    }
                    hoster['extra'] = scraper_utils.cleanse_title(
                        result['title'])
                    if video.video_type == VIDEO_TYPES.MOVIE:
                        meta = scraper_utils.parse_movie_link(hoster['extra'])
                    else:
                        meta = scraper_utils.parse_episode_link(
                            hoster['extra'])
                    if 'format' in meta: hoster['format'] = meta['format']

                    hosters.append(hoster)
                    seen_urls.add(stream_url)

        return hosters
 def __get_ajax_sources(self, html, page_url):
     stream_url = ''
     match = re.search('''\$\.getJSON\('([^']+)'\s*,\s*(\{.*?\})''', html)
     if match:
         ajax_url, params = match.groups()
         params = scraper_utils.parse_params(params)
         ajax_url = scraper_utils.urljoin(self.base_url, ajax_url)
         headers = {'Referer': page_url}
         headers.update(XHR)
         html = self._http_get(ajax_url,
                               params=params,
                               headers=headers,
                               cache_limit=.5)
         js_data = scraper_utils.parse_json(html, ajax_url)
         stream_url = js_data.get('file', '')
     return stream_url
 def __login(self):
     url = scraper_utils.urljoin(self.base_url, '/apis/v2/user/login.json')
     data = {
         'email': self.username,
         'password': self.password,
         'rememberMe': True
     }
     referer = scraper_utils.urljoin(self.base_url, '/login')
     headers = {'Content-Type': 'application/json', 'Referer': referer}
     headers.update(XHR)
     html = super(self.__class__, self)._http_get(url,
                                                  data=json.dumps(data),
                                                  headers=headers,
                                                  cache_limit=0)
     js_data = scraper_utils.parse_json(html, url)
     return js_data.get('status') == 'success'
 def search(self, video_type, title, year, season=''):  # @UnusedVariable
     results = []
     media_type = 'series' if video_type == VIDEO_TYPES.TVSHOW else 'movie'
     search_url = scraper_utils.urljoin(self.base_url, '/typeahead/%s' % (urllib.quote(title)))
     headers = {'Referer': self.base_url}
     headers.update(XHR)
     html = self._http_get(search_url, headers=headers, require_debrid=True, cache_limit=.5)
     for item in scraper_utils.parse_json(html, search_url):
         match_title = item.get('title')
         match_url = item.get('link')
         match_year = ''
         if item.get('type') == media_type and match_title and match_url:
             if not year or not match_year or year == match_year:
                 result = {'title': scraper_utils.cleanse_title(match_title), 'year': match_year, 'url': scraper_utils.pathify_url(match_url)}
                 results.append(result)
         
     return results
Exemple #30
0
    def __get_gvideo_links(self, link):
        sources = []
        html = self._http_get(link, cache_limit=1)
        html = self.__decode_link(html)
        match = re.search('{\s*link\s*:\s*"([^"]+)', html)
        if match:
            data = {'link': match.group(1)}
            headers = {'Referer': link}
            html = self._http_get(GK_URL,
                                  data=data,
                                  headers=headers,
                                  cache_limit=.5)
            js_data = scraper_utils.parse_json(html, data)
            for link in js_data.get('link', []):
                sources.append({'host': '', 'link': link['link']})

        return sources