Example #1
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if source_url and source_url != FORCE_NO_MATCH:
            url = urlparse.urljoin(self.base_url, source_url)
            html = self._http_get(url, cache_limit=.5)

            if re.search('<span[^>]+>\s*Low Quality\s*</span>', html):
                quality = QUALITIES.LOW
            else:
                quality = QUALITIES.HIGH
            
            for match in re.finditer('gkpluginsphp.*?link\s*:\s*"([^"]+)', html):
                data = {'link': match.group(1)}
                headers = XHR
                headers['Referer'] = url
                gk_url = urlparse.urljoin(self.base_url, GK_URL)
                html = self._http_get(gk_url, data=data, headers=headers, cache_limit=.25)
                if html:
                    try:
                        js_result = json.loads(html)
                    except ValueError:
                        log_utils.log('Invalid JSON returned: %s: %s' % (url, html), log_utils.LOGWARNING)
                    else:
                        if 'link' in js_result and 'func' not in js_result:
                            if isinstance(js_result['link'], list):
                                sources = dict((link['link'], self._height_get_quality(link['label'])) for link in js_result['link'])
                            else:
                                sources = {js_result['link']: quality}
                            
                            for source in sources:
                                host = self._get_direct_hostname(source)
                                hoster = {'multi-part': False, 'url': source, 'class': self, 'quality': sources[source], 'host': host, 'rating': None, 'views': None, 'direct': True}
                                hosters.append(hoster)
        return hosters
    def search(self, video_type, title, year):
        results = []
        html = self. _http_get(self.base_url, cache_limit=0)
        match = re.search("var\s+tok\s*=\s*'([^']+)", html)
        if match:
            token = match.group(1)
            
            search_url = urlparse.urljoin(self.base_url, '/ajax/search.php?q=')
            search_url += urllib.quote_plus(title)
            timestamp = int(time.time() * 1000)
            query = {'q': title, 'limit': '100', 'timestamp': timestamp, 'verifiedCheck': token}
            html = self._http_get(search_url, data=query, cache_limit=.25)
            if video_type in [VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE]:
                media_type = 'TV SHOW'
            else:
                media_type = 'MOVIE'

            if html:
                try:
                    js_data = json.loads(html)
                except ValueError:
                    log_utils.log('No JSON returned: %s: %s' % (search_url, html), xbmc.LOGWARNING)
                else:
                    for item in js_data:
                        if item['meta'].upper().startswith(media_type):
                            result = {'title': item['title'], 'url': item['permalink'].replace(self.base_url, ''), 'year': ''}
                            results.append(result)

        else:
            log_utils.log('Unable to locate CartoonHD token', xbmc.LOGWARNING)
        return results
Example #3
0
 def search(self, video_type, title, year):
     search_url = urlparse.urljoin(self.base_url, '/index.php?search_keywords=')
     search_url += urllib.quote_plus(title)
     search_url += '&year=' + urllib.quote_plus(str(year))
     if video_type in [VIDEO_TYPES.TVSHOW, VIDEO_TYPES.EPISODE]:
         search_url += '&search_section=2'
     else:
         search_url += '&search_section=1'
         
     results=[]
     html = self. _http_get(self.base_url, cache_limit=0)
     match = re.search('input type="hidden" name="key" value="([0-9a-f]*)"', html)
     if match:
         key=match.group(1)
         search_url += '&key=' + key
         
         html = self._http_get(search_url, cache_limit=.25)
         pattern = r'class="index_item.+?href="(.+?)" title="Watch (.+?)"?\(?([0-9]{4})?\)?"?>'
         for match in re.finditer(pattern, html):
             result={}
             url, title, year = match.groups('')
             result['url']=url
             result['title']=title
             result['year']=year
             results.append(result)
     else:
         log_utils.log('Unable to locate PW search key', xbmc.LOGWARNING)
     return results
 def search(self, video_type, title, year):
     search_url = urlparse.urljoin(self.base_url, '/v1/api/search?query=')
     search_url += urllib.quote_plus(title)
     html = self._http_get(search_url, cache_limit=.25)
     results = []
     if html:
         try:
             js_data = json.loads(html)
         except ValueError:
             log_utils.log('Invalid JSON returned: %s: %s' % (search_url, html), xbmc.LOGWARNING)
         else:
             if 'movies' in js_data:
                 for item in js_data['movies']:
                     if item['type'] != 'movies':
                         continue
                     
                     match = re.search('(.*)(?:\s+\((\d{4})\))', item['title'])
                     if match:
                         match_title, match_year = match.groups()
                     else:
                         match_title = item['title']
                         match_year = ''
                     
                     result = {'title': match_title, 'url': '/movies/%s' % (item['slug']), 'year': match_year}
                     results.append(result)
     return results
 def search(self, video_type, title, year):
     search_url = urlparse.urljoin(self.base_url, '/index.php?menu=search&query=')
     search_url += urllib.quote_plus(title)
     html = self._http_get(search_url, cache_limit=.25)
     results=[]
     
     # filter the html down to only tvshow or movie results
     if video_type in [VIDEO_TYPES.TVSHOW, VIDEO_TYPES.SEASON, VIDEO_TYPES.EPISODE]:
         pattern='id="series".*'
         pattern2 = '<a title="Watch (.*?) Online For FREE".*?href="([^"]+)".*\((\d{1,4})\)</a>'
     else:
         pattern='id="movies".*id="series"'
         pattern2 = 'visible-sm">\s+<a\s+title="([^"]+)\s+(\d{4})".*?href="([^"]+)"'
     match = re.search(pattern, html, re.DOTALL)
     if match:
         try:
             fragment = match.group(0)
             for match in re.finditer(pattern2, fragment):
                 result={}
                 
                 if video_type == VIDEO_TYPES.MOVIE:
                     res_title, res_year, url = match.groups('')
                 else:
                     res_title, url, res_year = match.groups('')
                     
                 if not year or year == res_year:                
                     result['title']=res_title
                     result['url']=url.replace(self.base_url,'')
                     result['year']=res_year
                     results.append(result)
         except Exception as e:
             log_utils.log('Failure during %s search: |%s|%s|%s| (%s)' % (self.get_name(), video_type, title, year, str(e)), xbmc.LOGWARNING)
     
     return results
Example #6
0
    def _get_episode_url(self, show_url, video):
        url = urlparse.urljoin(self.base_url, show_url)
        html = self._http_get(url, cache_limit=1)
        match = re.search("var\s+id\s*=\s*'?(\d+)'?", html)
        if match:
            show_id = match.group(1)
            season_url = SEASON_URL % (show_id, video.season, str(int(time.time()) * 1000))
            season_url = urlparse.urljoin(self.base_url, season_url)
            html = self._http_get(season_url, cache_limit=1)
            try:
                js_data = json.loads(html)
            except ValueError:
                log_utils.log("Invalid JSON returned: %s: %s" % (url, html), log_utils.LOGWARNING)
            else:
                force_title = self._force_title(video)
                if not force_title:
                    for episode in js_data:
                        if int(episode["episode_number"]) == int(video.episode):
                            return LINK_URL % (show_id, video.season, episode["episode_number"])

                if (force_title or kodi.get_setting("title-fallback") == "true") and video.ep_title:
                    norm_title = self._normalize_title(video.ep_title)
                    for episode in js_data:
                        if norm_title == self._normalize_title(episode["title"]):
                            return LINK_URL % (show_id, video.season, episode["episode_number"])
 def get_url(self, video):
     url = None
     result = self.db_connection.get_related_url(video.video_type, video.title, video.year, self.get_name(), video.season, video.episode)
     if result:
         url=result[0][0]
         log_utils.log('Got local related url: |%s|%s|%s|%s|%s|' % (video.video_type, video.title, video.year, self.get_name(), url))
     else:
         select = int(xbmcaddon.Addon().getSetting('%s-select' % (self.get_name())))
         if video.video_type == VIDEO_TYPES.EPISODE:
             search_title = '%s S%02dE%02d' % (video.title, int(video.season), int(video.episode))
         else:
             search_title = '%s %s' % (video.title, video.year)
         results = self.search(video.video_type, search_title, video.year)
         if results:
             if select == 0:
                 best_result = results[0]
             else:
                 best_qorder=0
                 best_qstr=''
                 for result in results:
                     match = re.search('\[(.*)\]$', result['title'])
                     if match:
                         q_str = match.group(1)
                         quality=self._blog_get_quality(video, q_str, '')
                         #print 'result: |%s|%s|%s|%s|' % (result, q_str, quality, Q_ORDER[quality])
                         if Q_ORDER[quality]>=best_qorder:
                             if Q_ORDER[quality] > best_qorder or (quality == QUALITIES.HD and '1080' in q_str and '1080' not in best_qstr):
                                 #print 'Setting best as: |%s|%s|%s|%s|' % (result, q_str, quality, Q_ORDER[quality])
                                 best_qstr = q_str
                                 best_result=result
                                 best_qorder = Q_ORDER[quality]
                         
             url = best_result['url']
             self.db_connection.set_related_url(video.video_type, video.title, video.year, self.get_name(), url)
     return url
Example #8
0
def update_all_scrapers():
        try: last_check = int(kodi.get_setting('last_list_check'))
        except: last_check = 0
        now = int(time.time())
        list_url = kodi.get_setting('scraper_url')
        scraper_password = kodi.get_setting('scraper_password')
        list_path = os.path.join(kodi.translate_path(kodi.get_profile()), 'scraper_list.txt')
        exists = os.path.exists(list_path)
        if list_url and scraper_password and (not exists or last_check < (now - (24 * 60 * 60))):
            scraper_list = utils2.get_and_decrypt(list_url, scraper_password)
            if scraper_list:
                try:
                    with open(list_path, 'w') as f:
                        f.write(scraper_list)
    
                    kodi.set_setting('last_list_check', str(now))
                    kodi.set_setting('scraper_last_update', time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(now)))
                    for line in scraper_list.split('\n'):
                        line = line.replace(' ', '')
                        if line:
                            scraper_url, filename = line.split(',')
                            if scraper_url.startswith('http'):
                                update_scraper(filename, scraper_url)
                except Exception as e:
                    log_utils.log('Exception during scraper update: %s' % (e), log_utils.LOGWARNING)
Example #9
0
def update_settings():
    full_path = os.path.join(kodi.get_path(), 'resources', 'settings.xml')
    
    try:
        # open for append; skip update if it fails
        with open(full_path, 'a') as f:
            pass
    except Exception as e:
        log_utils.log('Dynamic settings update skipped: %s' % (e), log_utils.LOGWARNING)
    else:
        with open(full_path, 'r') as f:
            xml = f.read()

        new_settings = []
        cat_count = 1
        old_xml = xml
        classes = scraper.Scraper.__class__.__subclasses__(scraper.Scraper)  # @UndefinedVariable
        classes += proxy.Proxy.__class__.__subclasses__(proxy.Proxy)  # @UndefinedVariable
        for cls in sorted(classes, key=lambda x: x.get_name().upper()):
            if not cls.get_name() or cls.has_proxy(): continue
            new_settings += cls.get_settings()
            if len(new_settings) > 90:
                xml = update_xml(xml, new_settings, cat_count)
                new_settings = []
                cat_count += 1
    
        if new_settings:
            xml = update_xml(xml, new_settings, cat_count)
    
        if xml != old_xml:
            with open(full_path, 'w') as f:
                f.write(xml)
        else:
            log_utils.log('No Settings Update Needed', log_utils.LOGDEBUG)
Example #10
0
    def onPlayBackStarted(self):
        log_utils.log('Service: Playback started')
        playing = self.win.getProperty('salts.playing') == 'True'
        self.trakt_id = self.win.getProperty('salts.playing.trakt_id')
        self.season = self.win.getProperty('salts.playing.season')
        self.episode = self.win.getProperty('salts.playing.episode')
        srt_path = self.win.getProperty('salts.playing.srt')
        trakt_resume = self.win.getProperty('salts.playing.trakt_resume')
        salts_resume = self.win.getProperty('salts.playing.salts_resume')
        if playing:   # Playback is ours
            log_utils.log('Service: tracking progress...')
            self.tracked = True
            if srt_path:
                log_utils.log('Service: Enabling subtitles: %s' % (srt_path))
                self.setSubtitles(srt_path)
            else:
                self.showSubtitles(False)

        self._totalTime = 0
        while self._totalTime == 0:
            try:
                self._totalTime = self.getTotalTime()
            except RuntimeError:
                self._totalTime = 0
                break
            xbmc.sleep(1000)

        if salts_resume:
            log_utils.log("Salts Local Resume: Resume Time: %s Total Time: %s" % (salts_resume, self._totalTime), log_utils.LOGDEBUG)
            self.seekTime(float(salts_resume))
        elif trakt_resume:
            resume_time = float(trakt_resume) * self._totalTime / 100
            log_utils.log("Salts Trakt Resume: Percent: %s, Resume Time: %s Total Time: %s" % (trakt_resume, resume_time, self._totalTime), log_utils.LOGDEBUG)
            self.seekTime(resume_time)
Example #11
0
 def onPlayBackStopped(self):
     log_utils.log('Service: Playback Stopped')
     if self.tracked:
         # clear the playlist if SALTS was playing and only one item in playlist to
         # use playlist to determine playback method in get_sources
         pl = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
         plugin_url = 'plugin://%s/' % (kodi.get_id())
         if pl.size() == 1 and pl[0].getfilename().lower().startswith(plugin_url):
             log_utils.log('Service: Clearing Single Item SALTS Playlist', log_utils.LOGDEBUG)
             pl.clear()
             
         playedTime = float(self._lastPos)
         try: percent_played = int((playedTime / self._totalTime) * 100)
         except: percent_played = 0  # guard div by zero
         pTime = utils2.format_time(playedTime)
         tTime = utils2.format_time(self._totalTime)
         log_utils.log('Service: Played %s of %s total = %s%%' % (pTime, tTime, percent_played), log_utils.LOGDEBUG)
         if playedTime == 0 and self._totalTime == 999999:
             log_utils.log('Kodi silently failed to start playback', log_utils.LOGWARNING)
         elif playedTime >= 5:
             log_utils.log('Service: Setting bookmark on |%s|%s|%s| to %s seconds' % (self.trakt_id, self.season, self.episode, playedTime), log_utils.LOGDEBUG)
             db_connection.set_bookmark(self.trakt_id, playedTime, self.season, self.episode)
             if percent_played >= 75:
                 if xbmc.getCondVisibility('System.HasAddon(script.trakt)'):
                     run = 'RunScript(script.trakt, action=sync, silent=True)'
                     xbmc.executebuiltin(run)
         self.reset()
 def _http_get(self, url, headers=None, cache_limit=8):
     html = super(OneClickWatch_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, headers=headers, cache_limit=cache_limit)
     cookie = self._get_sucuri_cookie(html)
     if cookie:
         log_utils.log('Setting OCW cookie: %s' % (cookie), log_utils.LOGDEBUG)
         html = super(OneClickWatch_Scraper, self)._cached_http_get(url, self.base_url, self.timeout, cookies=cookie, headers=headers, cache_limit=0)
     return html
    def search(self, video_type, title, year):
        search_url = urlparse.urljoin(self.base_url, "/index.php?menu=search&query=")
        search_url += urllib.quote_plus(title)
        html = self._http_get(search_url, cache_limit=0.25)
        results = []

        # filter the html down to only tvshow or movie results
        if video_type in [VIDEO_TYPES.TVSHOW, VIDEO_TYPES.SEASON, VIDEO_TYPES.EPISODE]:
            pattern = 'id="series".*'
            pattern2 = '<a title="Watch (.*?) Online For FREE".*?href="([^"]+)".*\((\d{1,4})\)</a>'
        else:
            pattern = 'id="movies".*id="series"'
            pattern2 = '<a\s+title="([^"]+)\s+\d{4}\.?".*?href="([^"]+)".*?\((\d{4})\.?\)</a>'
        match = re.search(pattern, html, re.DOTALL)
        if match:
            try:
                fragment = match.group(0)
                for match in re.finditer(pattern2, fragment):
                    res_title, url, res_year = match.groups("")
                    if not year or not res_year or year == res_year:
                        result = {"title": res_title, "url": url.replace(self.base_url, ""), "year": res_year}
                        results.append(result)
            except Exception as e:
                log_utils.log(
                    "Failure during %s search: |%s|%s|%s| (%s)" % (self.get_name(), video_type, title, year, str(e)),
                    xbmc.LOGWARNING,
                )
        return results
Example #14
0
        def onInit(self):
            log_utils.log('onInit:', log_utils.LOGDEBUG)
            self.OK = False
            self.radio_buttons = []
            posy = starty
            for label in RADIO_BUTTONS:
                self.radio_buttons.append(self.__get_radio_button(posx, posy, label))
                posy += gap
            
            try: responses = json.loads(kodi.get_setting('prev_responses'))
            except: responses = [True] * len(self.radio_buttons)
            if len(responses) < len(self.radio_buttons):
                responses += [True] * (len(self.radio_buttons) - len(responses))
            
            self.addControls(self.radio_buttons)
            last_button = None
            for response, radio_button in zip(responses, self.radio_buttons):
                radio_button.setSelected(response)
                if last_button is not None:
                    radio_button.controlUp(last_button)
                    radio_button.controlLeft(last_button)
                    last_button.controlDown(radio_button)
                    last_button.controlRight(radio_button)
                last_button = radio_button

            continue_ctrl = self.getControl(CONTINUE_BUTTON)
            cancel_ctrl = self.getControl(CANCEL_BUTTON)
            self.radio_buttons[0].controlUp(cancel_ctrl)
            self.radio_buttons[0].controlLeft(cancel_ctrl)
            self.radio_buttons[-1].controlDown(continue_ctrl)
            self.radio_buttons[-1].controlRight(continue_ctrl)
            continue_ctrl.controlUp(self.radio_buttons[-1])
            continue_ctrl.controlLeft(self.radio_buttons[-1])
            cancel_ctrl.controlDown(self.radio_buttons[0])
            cancel_ctrl.controlRight(self.radio_buttons[0])
 def search(self, video_type, title, year):
     search_url = urlparse.urljoin(self.base_url, '/search/?criteria=title&search_query=')
     search_url += urllib.quote_plus(title)
     html = self._http_get(search_url, cache_limit=.25)
     results=[]
     
     # filter the html down to only tvshow or movie results
     if video_type in [VIDEO_TYPES.TVSHOW, VIDEO_TYPES.SEASON, VIDEO_TYPES.EPISODE]:
         pattern='<h1>Tv Shows</h1>.*'
     else:
         pattern='<div class="filmDiv".*(<h1>Tv Shows</h1>)*'
     match = re.search(pattern, html, re.DOTALL)
     try:
         if match:
             fragment = match.group(0)
             pattern = 'href="([^"]+)" class="filmname">(.*?)\s*</a>.*?/all/byViews/(\d+)/'
             for match in re.finditer(pattern, fragment, re.DOTALL):
                 result={}
                 url, res_title, res_year = match.groups('')
                 if not year or year == res_year:                
                     result['title']=res_title
                     result['url']=url.replace(self.base_url,'')
                     result['year']=res_year
                     results.append(result)
     except Exception as e:
         log_utils.log('Failure during %s search: |%s|%s|%s| (%s)' % (self.get_name(), video_type, title, year, str(e)), xbmc.LOGWARNING)
     
     return results
Example #16
0
    def _parse_google(self, link):
        sources = []
        html = self._http_get(link, cache_limit=.5)
        i = link.rfind('#')
        if i > -1:
            link_id = link[i + 1:]
            match = re.search('feedPreload:\s*(.*}]}})},', html, re.DOTALL)
            if match:
                try:
                    js = json.loads(match.group(1))
                except ValueError:
                    log_utils.log('Invalid JSON returned for: %s' % (link), log_utils.LOGWARNING)
                else:
                    for item in js['feed']['entry']:
                        if item['gphoto$id'] == link_id:
                            for media in item['media']['content']:
                                if media['type'].startswith('video'):
                                    sources.append(media['url'].replace('%3D', '='))
        else:
            match = re.search('preload\'?:\s*(.*}})},', html, re.DOTALL)
            if match:
                try:
                    js = json.loads(match.group(1))
                except ValueError:
                    log_utils.log('Invalid JSON returned for: %s' % (link), log_utils.LOGWARNING)
                else:
                    for media in js['feed']['media']['content']:
                        if media['type'].startswith('video'):
                            sources.append(media['url'].replace('%3D', '='))

        return sources
Example #17
0
 def get_sources(self, video):
     source_url = self.get_url(video)
     hosters = []
     if source_url and source_url != FORCE_NO_MATCH:
         url = urlparse.urljoin(self.base_url, source_url)
         html = self._http_get(url, cache_limit=.5)
         fragment = dom_parser.parse_dom(html, 'ul', {'class': 'css_server_new'})
         if fragment:
             for match in re.finditer('href="([^"]+)[^>]*>(.*?)(?:-\d+)?</a>', fragment[0]):
                 url, host = match.groups()
                 host = host.lower()
                 host = re.sub('<img.*?/>', '', host)
                 host = HOSTS.get(host, host)
                 log_utils.log('%s - %s' % (url, host))
                 if host in GVIDEO_NAMES:
                     sources = self.__get_links(urlparse.urljoin(self.base_url, url))
                     direct = True
                 else:
                     sources = {url: host}
                     direct = False
                 
                 for source in sources:
                     if self._get_direct_hostname(source) == 'gvideo':
                         quality = scraper_utils.gv_get_quality(source)
                         source = source + '|User-Agent=%s' % (scraper_utils.get_ua())
                     else:
                         quality = scraper_utils.get_quality(video, source, QUALITIES.HIGH)
                 
                     hoster = {'multi-part': False, 'host': sources[source], 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': source, 'direct': direct}
                     hosters.append(hoster)
     return hosters
Example #18
0
    def search(self, video_type, title, year):
        filter_str = '{"field": "title", "operator": "contains", "value": "%s"}' % (title)
        if year: filter_str = '{"and": [%s, {"field": "year", "operator": "is", "value": "%s"}]}' % (filter_str, year)
        if video_type == VIDEO_TYPES.MOVIE:
            cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": { "filter": %s, "limits": { "start" : 0, "end": 25 }, "properties" : ["title", "year", "file", "streamdetails"], \
            "sort": { "order": "ascending", "method": "label", "ignorearticle": true } }, "id": "libMovies"}'
            result_key = 'movies'
            id_key = 'movieid'
        else:
            cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": { "filter": %s, "limits": { "start" : 0, "end": 25 }, "properties" : ["title", "year"], \
            "sort": { "order": "ascending", "method": "label", "ignorearticle": true } }, "id": "libTvShows"}'
            result_key = 'tvshows'
            id_key = 'tvshowid'

        results = []
        cmd = cmd % (filter_str)
        meta = xbmc.executeJSONRPC(cmd)
        meta = json.loads(meta)
        log_utils.log('Search Meta: %s' % (meta), log_utils.LOGDEBUG)
        if 'result' in meta and result_key in meta['result']:
            for item in meta['result'][result_key]:
                if video_type == VIDEO_TYPES.MOVIE and item['file'].endswith('.strm'):
                    continue

                result = {'title': item['title'], 'year': item['year'], 'url': 'video_type=%s&id=%s' % (video_type, item[id_key])}
                results.append(result)
        return results
Example #19
0
    def get_url(self, video):
        url = None
        result = self.db_connection.get_related_url(video.video_type, video.title, video.year, self.get_name(), video.season, video.episode)
        if result:
            url = result[0][0]
            log_utils.log('Got local related url: |%s|%s|%s|%s|%s|' % (video.video_type, video.title, video.year, self.get_name(), url))
        else:
            date_match = False
            search_title = '%s S%02dE%02d' % (video.title, int(video.season), int(video.episode))
            results = self.search(video.video_type, search_title, '')
            if not results and video.ep_airdate is not None:
                search_title = '%s %s' % (video.title, video.ep_airdate.strftime('%Y.%m.%d'))
                results = self.search(video.video_type, search_title, '')
                date_match = True

            best_q_index = -1
            for result in results:
                if date_match and video.ep_airdate.strftime('%Y.%m.%d') not in result['title']:
                    continue
                
                if Q_DICT[result['quality']] > best_q_index:
                    best_q_index = Q_DICT[result['quality']]
                    url = result['url']
            self.db_connection.set_related_url(video.video_type, video.title, video.year, self.get_name(), url)
        return url
    def get_sources(self, video):
        source_url = self.get_url(video)
        sources = []
        if source_url:
            params = urlparse.parse_qs(source_url)
            show_url = CONTENT_URL % (params['catalog_id'][0])
            url = urlparse.urljoin(self.base_url, show_url)
            html = self._http_get(url, cache_limit=.5)
            try:
                js_data = json.loads(html)
                if video.video_type == VIDEO_TYPES.EPISODE:
                    js_data = self.__get_episode_json(params, js_data)
            except ValueError:
                log_utils.log('Invalid JSON returned for: %s' % (url), xbmc.LOGWARNING)
            else:
                for film in js_data['listvideos']:
                    source_url = SOURCE_URL % (film['film_id'], params['catalog_id'][0])
                    url = urlparse.urljoin(self.base_url, source_url)
                    time.sleep(1.5)
                    html = self._http_get(url, cache_limit=.5)
                    try:
                        film_js = json.loads(html)
                    except ValueError:
                        log_utils.log('Invalid JSON returned for: %s' % (url), xbmc.LOGWARNING)
                    else:
                        for film in film_js['videos']:
                            film_link = self.__decrypt(FILM_KEY, base64.b64decode(film['film_link']))
                            for match in re.finditer('(http.*?(?:#(\d+)#)?)(?=http|$)', film_link):
                                link, height = match.groups()
                                source = {'multi-part': False, 'url': link, 'host': self._get_direct_hostname(link), 'class': self, 'quality': self._gv_get_quality(link), 'views': None, 'rating': None, 'direct': True}
                                if height is not None: source['resolution'] = '%sp' % (height)
                                sources.append(source)

        return sources
Example #21
0
 def _http_get(self, url, data=None, headers=None, cache_limit=8):
     html = self._cached_http_get(url, self.base_url, self.timeout, data=data, cache_limit=cache_limit)
     cookie = scraper_utils.get_sucuri_cookie(html)
     if cookie:
         log_utils.log('Setting Pubfilm cookie: %s' % (cookie), log_utils.LOGDEBUG)
         html = self._cached_http_get(url, self.base_url, self.timeout, cookies=cookie, data=data, headers=headers, cache_limit=0)
     return html
Example #22
0
 def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
     self.timeout = timeout
     self.__scraper = None
     try:
         self.__scraper = real_scraper(timeout)
     except Exception as e:
         log_utils.log('Failure during %s scraper creation: %s' % (self.get_name(), e), log_utils.LOGDEBUG)
Example #23
0
def update_settings():
    path = xbmcaddon.Addon().getAddonInfo('path')
    full_path = os.path.join(path, 'resources', 'settings.xml')
    try:
        with open(full_path, 'r') as f:
            xml = f.read()
    except:
        raise

    new_settings = []
    cat_count = 1
    old_xml = xml
    classes = scraper.Scraper.__class__.__subclasses__(scraper.Scraper)
    for cls in sorted(classes, key=lambda x: x.get_name().upper()):
        new_settings += cls.get_settings()

        if len(new_settings) > 90:
            xml = update_xml(xml, new_settings, cat_count)
            new_settings = []
            cat_count += 1

    if new_settings:
        xml = update_xml(xml, new_settings, cat_count)

    if xml != old_xml:
        try:
            with open(full_path, 'w') as f:
                f.write(xml)
        except:
            raise
    else:
        log_utils.log('No Settings Update Needed', xbmc.LOGDEBUG)
Example #24
0
    def _default_get_url(self, video):
        temp_video_type = video.video_type
        if video.video_type == VIDEO_TYPES.EPISODE: temp_video_type = VIDEO_TYPES.TVSHOW
        url = None
        self.create_db_connection()

        result = self.db_connection.get_related_url(temp_video_type, video.title, video.year, self.get_name())
        if result:
            url = result[0][0]
            log_utils.log('Got local related url: |%s|%s|%s|%s|%s|' % (temp_video_type, video.title, video.year, self.get_name(), url))
        else:
            results = self.search(temp_video_type, video.title, video.year)
            if results:
                url = results[0]['url']
                self.db_connection.set_related_url(temp_video_type, video.title, video.year, self.get_name(), url)

        if url and video.video_type == VIDEO_TYPES.EPISODE:
            result = self.db_connection.get_related_url(VIDEO_TYPES.EPISODE, video.title, video.year, self.get_name(), video.season, video.episode)
            if result:
                url = result[0][0]
                log_utils.log('Got local related url: |%s|%s|%s|' % (video, self.get_name(), url))
            else:
                show_url = url
                url = self._get_episode_url(show_url, video)
                if url:
                    self.db_connection.set_related_url(VIDEO_TYPES.EPISODE, video.title, video.year, self.get_name(), url, video.season, video.episode)

        return url
Example #25
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if source_url and source_url != FORCE_NO_MATCH:
            params = urlparse.parse_qs(source_url)
            if video.video_type == VIDEO_TYPES.MOVIE:
                cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"movieid": %s, "properties" : ["file", "playcount", "streamdetails"]}, "id": "libMovies"}'
                result_key = 'moviedetails'
            else:
                cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"episodeid": %s, "properties" : ["file", "playcount", "streamdetails"]}, "id": "libTvShows"}'
                result_key = 'episodedetails'

            run = cmd % (params['id'][0])
            meta = xbmc.executeJSONRPC(run)
            meta = json.loads(meta)
            log_utils.log('Source Meta: %s' % (meta), log_utils.LOGDEBUG)
            if 'result' in meta and result_key in meta['result']:
                details = meta['result'][result_key]
                def_quality = [item[0] for item in sorted(SORT_KEYS['quality'].items(), key=lambda x:x[1])][self.def_quality]
                host = {'multi-part': False, 'class': self, 'url': details['file'], 'host': 'XBMC Library', 'quality': def_quality, 'views': details['playcount'], 'rating': None, 'direct': True}
                stream_details = details['streamdetails']
                if len(stream_details['video']) > 0 and 'width' in stream_details['video'][0]:
                    host['quality'] = self._width_get_quality(stream_details['video'][0]['width'])
                hosters.append(host)
        return hosters
 def _get_episode_url(self, show_url, video):
     params = urlparse.parse_qs(show_url)
     catalog_id = params['catalog_id'][0]
     sid = hashlib.md5('content%scthd' % (catalog_id)).hexdigest()
     source_url = CONTENT_URL % (catalog_id, sid)
     url = urlparse.urljoin(self.base_url, source_url)
     html = self._http_get(url, cache_limit=.5)
     try:
         js_data = json.loads(html)
     except ValueError:
         log_utils.log('Invalid JSON returned for: %s' % (url), xbmc.LOGWARNING)
     else:
         force_title = self._force_title(video)
         if not force_title:
             for episode in js_data['listvideos']:
                 if ' S%02dE%02d ' % (int(video.season), int(video.episode)) in episode['film_name']:
                     return EPISODE_URL % (video.video_type, params['catalog_id'][0], video.season, video.episode)
         
         if (force_title or kodi.get_setting('title-fallback') == 'true') and video.ep_title:
             norm_title = self._normalize_title(video.ep_title)
             for episode in js_data['listvideos']:
                 match = re.search('-\s*S(\d+)E(\d+)\s*-\s*(.*)', episode['film_name'])
                 if match:
                     season, episode, title = match.groups()
                     if title and norm_title == self._normalize_title(title):
                         return EPISODE_URL % (video.video_type, params['catalog_id'][0], int(season), int(episode))
 def _set_cookies(self, base_url, cookies):
     cookie_file = os.path.join(COOKIEPATH, "%s_cookies.lwp" % (self.get_name()))
     cj = cookielib.LWPCookieJar(cookie_file)
     try:
         cj.load(ignore_discard=True)
     except:
         pass
     if xbmcaddon.Addon().getSetting("cookie_debug") == "true":
         log_utils.log("Before Cookies: %s - %s" % (self, self.cookies_as_str(cj)), xbmc.LOGDEBUG)
     domain = urlparse.urlsplit(base_url).hostname
     for key in cookies:
         c = cookielib.Cookie(
             0,
             key,
             str(cookies[key]),
             port=None,
             port_specified=False,
             domain=domain,
             domain_specified=True,
             domain_initial_dot=False,
             path="/",
             path_specified=True,
             secure=False,
             expires=None,
             discard=False,
             comment=None,
             comment_url=None,
             rest={},
         )
         cj.set_cookie(c)
     cj.save(ignore_discard=True)
     if xbmcaddon.Addon().getSetting("cookie_debug") == "true":
         log_utils.log("After Cookies: %s - %s" % (self, self.cookies_as_str(cj)), xbmc.LOGDEBUG)
     return cj
Example #28
0
    def __get_links(self, url, video):
        hosters = []
        seen_urls = set()
        for search_type in SEARCH_TYPES:
            search_url = self.__translate_search(url, search_type)
            if search_url:
                html = self._http_get(search_url, cache_limit=.5)
                js_result = scraper_utils.parse_json(html, search_url)
                if 'status' in js_result and js_result['status'] == 'success':
                    for result in js_result['result']:
                        if len(result['hosterurls']) > 1: continue
                        if result['extension'] == 'rar': continue
                        
                        stream_url = result['hosterurls'][0]['url']
                        if stream_url not in seen_urls:
                            if scraper_utils.title_check(video, result['title']):
                                host = urlparse.urlsplit(stream_url).hostname
                                quality = scraper_utils.get_quality(video, host, self._get_title_quality(result['title']))
                                hoster = {'multi-part': False, 'class': self, 'views': None, 'url': stream_url, 'rating': None, 'host': host, 'quality': quality, 'direct': False}
                                hoster['extra'] = result['title']
                                hosters.append(hoster)
                                seen_urls.add(stream_url)
                else:
                    log_utils.log('Alluc API Error: %s: %s' % (search_url, js_result['message']), log_utils.LOGWARNING)

        return hosters
Example #29
0
    def _http_get(self, url, data=None, cache_limit=8):
        # return all uncached blank pages if no user or pass
        if not self.username or not self.password:
            return ''

        if 'search?query' in url:
            log_utils.log('Translating Search Url: %s' % (url), xbmc.LOGDEBUG)
            url = self.__translate_search(url)

        html = super(DirectDownload_Scraper, self)._cached_http_get(
            url,
            self.base_url,
            self.timeout,
            data=data,
            cache_limit=cache_limit)

        fake = None
        try:
            js_result = json.loads(html)
            fake = False
            fake = js_result[0]['fake']
        except:
            pass

        if fake or (fake is None and not re.search(LOGOUT, html)):
            log_utils.log('Logging in for url (%s)' % (url), xbmc.LOGDEBUG)
            self.__login()
            html = super(DirectDownload_Scraper, self)._cached_http_get(
                url, self.base_url, self.timeout, data=data, cache_limit=0)

        return html
Example #30
0
 def search(self, video_type, title, year, season=''):
     search_url = urlparse.urljoin(self.base_url, '/search.php?q=%s&limit=20&timestamp=%s' % (urllib.quote_plus(title), int(time.time())))
     html = self._http_get(search_url, cache_limit=.25)
     results = []
     items = dom_parser.parse_dom(html, 'li')
     if len(items) >= 2:
         items = items[1:]
         for item in items:
             match_url = dom_parser.parse_dom(item, 'a', ret='href')
             match_title_year = dom_parser.parse_dom(item, 'strong')
             if match_url and match_title_year:
                 match_url = match_url[0]
                 match_title_year = re.sub('</?strong>', '', match_title_year[0])
                 is_season = re.search('S(?:eason\s+)?(\d+)$', match_title_year, re.I)
                 if not is_season and video_type == VIDEO_TYPES.MOVIE or is_season and VIDEO_TYPES.SEASON:
                     if video_type == VIDEO_TYPES.MOVIE:
                         match = re.search('(.*?)(?:\s+\(?(\d{4})\)?)', match_title_year)
                         if match:
                             match_title, match_year = match.groups()
                         else:
                             match_title = match_title_year
                             match_year = ''
                     else:
                         log_utils.log(is_season.group(1))
                         if season and int(is_season.group(1)) != int(season):
                             continue
                         match_title = match_title_year
                         match_year = ''
                 
                     result = {'title': match_title, 'year': match_year, 'url': scraper_utils.pathify_url(match_url)}
                     results.append(result)
     return results
Example #31
0
    def _get_episode_url(self, show_url, video):
        params = urlparse.parse_qs(show_url)
        cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodes", "params": {"tvshowid": %s, "season": %s, "filter": {"field": "%s", "operator": "is", "value": "%s"}, \
        "limits": { "start" : 0, "end": 25 }, "properties" : ["title", "season", "episode", "file", "streamdetails"], "sort": { "order": "ascending", "method": "label", "ignorearticle": true }}, "id": "libTvShows"}'
        base_url = 'video_type=%s&id=%s'
        episodes = []

        force_title = self._force_title(video)

        if not force_title:
            run = cmd % (params['id'][0], video.season, 'episode', video.episode)
            meta = xbmc.executeJSONRPC(run)
            meta = json.loads(meta)
            log_utils.log('Episode Meta: %s' % (meta), xbmc.LOGDEBUG)
            if 'result' in meta and 'episodes' in meta['result']:
                episodes = meta['result']['episodes']
        else:
            log_utils.log('Skipping S&E matching as title search is forced on: %s' % (video.trakt_id), xbmc.LOGDEBUG)

        if (force_title or xbmcaddon.Addon().getSetting('title-fallback') == 'true') and video.ep_title:
            run = cmd % (params['id'][0], video.season, 'title', video.ep_title)
            meta = xbmc.executeJSONRPC(run)
            meta = json.loads(meta)
            log_utils.log('Episode Title Meta: %s' % (meta), xbmc.LOGDEBUG)
            if 'result' in meta and 'episodes' in meta['result']:
                episodes = meta['result']['episodes']

        for episode in episodes:
            if episode['file'].endswith('.strm'):
                continue
            
            return base_url % (video.video_type, episode['episodeid'])
Example #32
0
    def search(self, video_type, title, year):
        search_url = urlparse.urljoin(self.base_url,
                                      '/index.php?menu=search&query=')
        search_url += urllib.quote_plus(title)
        html = self._http_get(search_url, cache_limit=.25)
        results = []

        # filter the html down to only tvshow or movie results
        if video_type in [
                VIDEO_TYPES.TVSHOW, VIDEO_TYPES.SEASON, VIDEO_TYPES.EPISODE
        ]:
            pattern = 'id="series".*'
            pattern2 = '<a title="Watch (.*?) Online For FREE".*?href="([^"]+)".*\((\d{1,4})\)</a>'
        else:
            pattern = 'id="movies".*id="series"'
            pattern2 = 'visible-sm">\s+<a\s+title="([^"]+)\s+(\d{4})".*?href="([^"]+)"'
        match = re.search(pattern, html, re.DOTALL)
        if match:
            try:
                fragment = match.group(0)
                for match in re.finditer(pattern2, fragment):
                    result = {}

                    if video_type == VIDEO_TYPES.MOVIE:
                        res_title, res_year, url = match.groups('')
                    else:
                        res_title, url, res_year = match.groups('')

                    if not year or year == res_year:
                        result['title'] = res_title
                        result['url'] = url.replace(self.base_url, '')
                        result['year'] = res_year
                        results.append(result)
            except Exception as e:
                log_utils.log(
                    'Failure during %s search: |%s|%s|%s| (%s)' %
                    (self.get_name(), video_type, title, year, str(e)),
                    xbmc.LOGWARNING)

        return results
Example #33
0
    def onPlayBackStarted(self):
        log_utils.log('Service: Playback started', log_utils.LOGNOTICE)
        playing = self.win.getProperty('salts.playing') == 'True'
        self.trakt_id = self.win.getProperty('salts.playing.trakt_id')
        self.season = self.win.getProperty('salts.playing.season')
        self.episode = self.win.getProperty('salts.playing.episode')
        srt_path = self.win.getProperty('salts.playing.srt')
        trakt_resume = self.win.getProperty('salts.playing.trakt_resume')
        salts_resume = self.win.getProperty('salts.playing.salts_resume')
        if playing:  # Playback is ours
            log_utils.log('Service: tracking progress...', log_utils.LOGNOTICE)
            self.tracked = True
            if srt_path:
                log_utils.log('Service: Enabling subtitles: %s' % (srt_path),
                              log_utils.LOGDEBUG)
                self.setSubtitles(srt_path)
            else:
                self.showSubtitles(False)

        self._totalTime = 0
        while self._totalTime == 0:
            try:
                self._totalTime = self.getTotalTime()
            except RuntimeError:
                self._totalTime = 0
                break
            xbmc.sleep(1000)

        if salts_resume:
            log_utils.log(
                "Salts Local Resume: Resume Time: %s Total Time: %s" %
                (salts_resume, self._totalTime), log_utils.LOGDEBUG)
            self.seekTime(float(salts_resume))
        elif trakt_resume:
            resume_time = float(trakt_resume) * self._totalTime / 100
            log_utils.log(
                "Salts Trakt Resume: Percent: %s, Resume Time: %s Total Time: %s"
                % (trakt_resume, resume_time, self._totalTime),
                log_utils.LOGDEBUG)
            self.seekTime(resume_time)
Example #34
0
    def __show_ice_ad(self, ad_url, ice_referer):
        if not ad_url: return
        try:
            wdlg = xbmcgui.WindowDialog()
            if not ad_url.startswith('http:'): ad_url = 'http:' + ad_url
            log_utils.log('Getting ad page: %s' % (ad_url), xbmc.LOGDEBUG)
            headers = {'Referer': ice_referer}
            html = self._http_get(ad_url, headers=headers, cache_limit=0)
            headers = {'Referer': ad_url}
            for match in re.finditer("<img\s+src='([^']+)'\s+width='(\d+)'\s+height='(\d+)'", html):
                img_url, width, height = match.groups()
                img_url = img_url.replace('&amp;', '&')
                width = int(width)
                height = int(height)
                log_utils.log('Image in page: |%s| - (%dx%d)' % (img_url, width, height), xbmc.LOGDEBUG)
                if width > 0 and height > 0:
                    left = (1280 - width) / 2
                    img = xbmcgui.ControlImage(left, 0, width, height, img_url)
                    wdlg.addControl(img)
                else:
                    _html = self._http_get(img_url, headers=headers, cache_limit=0)

            wdlg.show()
            dialog = xbmcgui.Dialog()
            dialog.ok('Stream All The Sources', 'Continue to Video')
            match = re.search("href='([^']+)", html)
            if match and random.randint(0, 100) < 5:
                log_utils.log('Link Clicked: %s' % (match.group(1)), xbmc.LOGDEBUG)
                html = self._http_get(match.group(1), cache_limit=0)
                match = re.search("location=decode\('([^']+)", html)
                if match:
                    _html = self._http_get(match.group(1), cache_limit=0)
        finally:
            wdlg.close()
Example #35
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if source_url:
            url = urlparse.urljoin(self.base_url, source_url)
            html = self._http_get(url, cache_limit=.5)
            js_result = scraper_utils.parse_json(html, url)
            if 'error' in js_result:
                log_utils.log('DD.tv API error: "%s" @ %s' % (js_result['error'], url), log_utils.LOGWARNING)
                return hosters

            query = urlparse.parse_qs(urlparse.urlparse(url).query)
            match_quality = self.q_order
            if 'quality' in query:
                temp_quality = re.sub('\s', '', query['quality'][0])
                match_quality = temp_quality.split(',')

            sxe_str = '.S%02dE%02d.' % (int(video.season), int(video.episode))
            try:
                airdate_str = video.ep_airdate.strftime('.%Y.%m.%d.')
            except:
                airdate_str = ''
                
            for result in js_result:
                if sxe_str not in result['release'] and airdate_str not in result['release']:
                    continue
                
                if result['quality'] in match_quality:
                    for key in result['links']:
                        url = result['links'][key][0]
                        if re.search('\.rar(\.|$)', url):
                            continue
                        
                        hostname = urlparse.urlparse(url).hostname
                        hoster = {'multi-part': False, 'class': self, 'views': None, 'url': url, 'rating': None, 'host': hostname, 'quality': QUALITY_MAP[result['quality']], 'direct': False}
                        hoster['dd_qual'] = result['quality']
                        if 'x265' in result['release'] and result['quality'] != '1080P-X265': hoster['dd_qual'] += '-x265'
                        hosters.append(hoster)

        return hosters
Example #36
0
 def __update_scraper_py(self):
     try:
         py_path = os.path.join(kodi.get_path(), 'scrapers', 'shush_scraper.py')
         exists = os.path.exists(py_path)
         scraper_url = kodi.get_setting('%s-scraper_url' % (self.get_name()))
         scraper_password = kodi.get_setting('%s-scraper_password' % (self.get_name()))
         if scraper_url and scraper_password and (not exists or os.path.getmtime(py_path) < time.time() - (4 * 60 * 60)):
             try:
                 req = urllib2.urlopen(scraper_url)
                 cipher_text = req.read()
             except Exception as e:
                 log_utils.log('Failure during %s scraper get: %s' % (self.get_name(), e), log_utils.LOGWARNING)
                 return
             
             if cipher_text:
                 scraper_key = hashlib.sha256(scraper_password).digest()
                 decrypter = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(scraper_key, IV))
                 new_py = decrypter.feed(cipher_text)
                 new_py += decrypter.feed()
                 
                 old_py = ''
                 if os.path.exists(py_path):
                     with open(py_path, 'r') as f:
                         old_py = f.read()
                 
                 log_utils.log('%s path: %s, new_py: %s, match: %s' % (self.get_name(), py_path, bool(new_py), new_py == old_py), log_utils.LOGDEBUG)
                 if old_py != new_py:
                     with open(py_path, 'w') as f:
                         f.write(new_py)
     except Exception as e:
         log_utils.log('Failure during %s scraper update: %s' % (self.get_name(), e), log_utils.LOGWARNING)
Example #37
0
 def get_url(self, video):
     url = None
     result = self.db_connection.get_related_url(video.video_type, video.title, video.year, self.get_name(), video.season, video.episode)
     if result:
         url=result[0][0]
         log_utils.log('Got local related url: |%s|%s|%s|%s|%s|' % (video.video_type, video.title, video.year, self.get_name(), url))
     else:
         select = int(xbmcaddon.Addon().getSetting('%s-select' % (self.get_name())))
         if video.video_type == VIDEO_TYPES.EPISODE:
             if not self._force_title(video):
                 search_title = '%s S%02dE%02d' % (video.title, int(video.season), int(video.episode))
             else:
                 if not video.ep_title: return None
                 search_title = '%s %s' % (video.title, video.ep_title)
         else:
             search_title = '%s %s' % (video.title, video.year)
         results = self.search(video.video_type, search_title, video.year)
         if results:
             # episodes don't tell us the quality on the search screen so just return the 1st result
             if select == 0 or video.video_type == VIDEO_TYPES.EPISODE:
                 best_result = results[0]
             else:
                 best_qorder=0
                 best_qstr=''
                 for result in results:
                     match = re.search('\[(.*)\]$', result['title'])
                     if match:
                         q_str = match.group(1)
                         quality=self._blog_get_quality(video, q_str, '')
                         #print 'result: |%s|%s|%s|%s|' % (result, q_str, quality, Q_ORDER[quality])
                         if Q_ORDER[quality]>=best_qorder:
                             if Q_ORDER[quality] > best_qorder or (quality == QUALITIES.HD and '1080' in q_str and '1080' not in best_qstr):
                                 #print 'Setting best as: |%s|%s|%s|%s|' % (result, q_str, quality, Q_ORDER[quality])
                                 best_qstr = q_str
                                 best_result=result
                                 best_qorder = Q_ORDER[quality]
                         
             url = best_result['url']
             self.db_connection.set_related_url(video.video_type, video.title, video.year, self.get_name(), url)
     return url
Example #38
0
    def resolve_link(self, link):
        url = '/player/pk/pk/plugins/player_p2.php'
        url = urlparse.urljoin(self.base_url, url)
        data = {'url': link}
        html = ''
        tries = 1
        while tries <= MAX_TRIES:
            html = self._http_get(url, data=data, cache_limit=0)
            log_utils.log('Initial Data (%s): |%s|' % (tries, html),
                          xbmc.LOGDEBUG)
            if html.strip():
                break
            tries += 1
        else:
            return None

        js_data = json.loads(html)
        if 'captcha' in js_data[0]:
            tries = 1
            while tries <= MAX_TRIES:
                data['type'] = js_data[0]['captcha']
                captcha_result = self._do_recaptcha(js_data[0]['k'], tries,
                                                    MAX_TRIES)
                data['chall'] = captcha_result['recaptcha_challenge_field']
                data['res'] = captcha_result['recaptcha_response_field']
                html = self._http_get(url, data=data, cache_limit=0)
                log_utils.log('2nd Data (%s): %s' % (tries, html),
                              xbmc.LOGDEBUG)
                if html:
                    js_data = json.loads(html)
                    if 'captcha' not in js_data[0]:
                        break
                tries += 1
            else:
                return None

        for elem in js_data:
            if elem['type'].startswith('video'):
                url = elem['url']
        return url
Example #39
0
    def _default_get_episode_url(self, show_url, video, episode_pattern, title_pattern='', airdate_pattern='', data=None, headers=None):
        log_utils.log('Default Episode Url: |%s|%s|%s|%s|' % (self.base_url, show_url, str(video).decode('utf-8', 'replace'), data), log_utils.LOGDEBUG)
        url = urlparse.urljoin(self.base_url, show_url)
        html = self._http_get(url, data=data, headers=headers, cache_limit=2)
        if html:
            force_title = self._force_title(video)

            if not force_title:
                match = re.search(episode_pattern, html, re.DOTALL)
                if match:
                    return self._pathify_url(match.group(1))

                if kodi.get_setting('airdate-fallback') == 'true' and airdate_pattern and video.ep_airdate:
                    airdate_pattern = airdate_pattern.replace('{year}', str(video.ep_airdate.year))
                    airdate_pattern = airdate_pattern.replace('{month}', str(video.ep_airdate.month))
                    airdate_pattern = airdate_pattern.replace('{p_month}', '%02d' % (video.ep_airdate.month))
                    airdate_pattern = airdate_pattern.replace('{month_name}', MONTHS[video.ep_airdate.month - 1])
                    airdate_pattern = airdate_pattern.replace('{short_month}', SHORT_MONS[video.ep_airdate.month - 1])
                    airdate_pattern = airdate_pattern.replace('{day}', str(video.ep_airdate.day))
                    airdate_pattern = airdate_pattern.replace('{p_day}', '%02d' % (video.ep_airdate.day))
                    log_utils.log('Air Date Pattern: %s' % (airdate_pattern), log_utils.LOGDEBUG)

                    match = re.search(airdate_pattern, html, re.DOTALL | re.I)
                    if match:
                        return self._pathify_url(match.group(1))
            else:
                log_utils.log('Skipping S&E matching as title search is forced on: %s' % (video.trakt_id), log_utils.LOGDEBUG)

            if (force_title or kodi.get_setting('title-fallback') == 'true') and video.ep_title and title_pattern:
                norm_title = self._normalize_title(video.ep_title)
                for match in re.finditer(title_pattern, html, re.DOTALL | re.I):
                    url, title = match.groups()
                    if norm_title == self._normalize_title(title):
                        return self._pathify_url(url)
Example #40
0
    def _parse_google(self, link):
        sources = []
        i = link.rfind('#')
        if i > -1:
            link_id = link[i + 1:]
            html = self._http_get(link, cache_limit=.5)
            match = re.search('feedPreload:\s*(.*}]}})},', html, re.DOTALL)
            if match:
                try:
                    js = json.loads(match.group(1))
                except ValueError:
                    log_utils.log('Invalid JSON returned for: %s' % (link),
                                  xbmc.LOGWARNING)
                else:
                    for item in js['feed']['entry']:
                        if item['gphoto$id'] == link_id:
                            for media in item['media']['content']:
                                if media['type'].startswith('video'):
                                    sources.append(media['url'].replace(
                                        '%3D', '='))

        return sources
Example #41
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        hosters = []
        if source_url:
            params = urlparse.parse_qs(source_url)
            if video.video_type == VIDEO_TYPES.MOVIE:
                cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovieDetails", "params": {"movieid": %s, "properties" : ["file", "playcount", "streamdetails"]}, "id": "libMovies"}'
                result_key = 'moviedetails'
            else:
                cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetEpisodeDetails", "params": {"episodeid": %s, "properties" : ["file", "playcount", "streamdetails"]}, "id": "libTvShows"}'
                result_key = 'episodedetails'

            run = cmd % (params['id'][0])
            meta = xbmc.executeJSONRPC(run)
            meta = json.loads(meta)
            log_utils.log('Source Meta: %s' % (meta), xbmc.LOGDEBUG)
            if 'result' in meta and result_key in meta['result']:
                details = meta['result'][result_key]
                def_quality = [
                    item[0] for item in sorted(SORT_KEYS['quality'].items(),
                                               key=lambda x: x[1])
                ][self.def_quality]
                host = {
                    'multi-part': False,
                    'class': self,
                    'url': details['file'],
                    'host': 'XBMC Library',
                    'quality': def_quality,
                    'views': details['playcount'],
                    'rating': None,
                    'direct': True
                }
                stream_details = details['streamdetails']
                if len(stream_details['video']
                       ) > 0 and 'width' in stream_details['video'][0]:
                    host['quality'] = self._width_get_quality(
                        stream_details['video'][0]['width'])
                hosters.append(host)
        return hosters
Example #42
0
    def search(self, video_type, title, year, season=''):
        results = []
        search_url = urlparse.urljoin(self.base_url, '/search?query=')
        search_url += title.replace("'", "")
        html = self._http_get(search_url, cache_limit=.25)
        js_result = scraper_utils.parse_json(html, search_url)
        if 'error' in js_result:
            log_utils.log(
                'DD.tv API error: "%s" @ %s' %
                (js_result['error'], search_url), log_utils.LOGWARNING)
            return results

        for match in js_result:
            url = search_url + '&quality=%s' % match['quality']
            result = {
                'url': scraper_utils.pathify_url(url),
                'title': scraper_utils.cleanse_title(match['release']),
                'quality': match['quality'],
                'year': ''
            }
            results.append(result)
        return results
 def search(self, video_type, title, year):
     results = []
     search_url = urlparse.urljoin(self.base_url, SEARCH_URL)
     search_url = search_url % (urllib.quote_plus(title))
     html = self._http_get(search_url, cache_limit=.25)
     if html:
         try:
             js_data = json.loads(html)
         except ValueError:
             log_utils.log('Invalid JSON returned for: %s' % (search_url),
                           xbmc.LOGWARNING)
         else:
             if 'films' in js_data['data']:
                 for item in js_data['data']['films']:
                     result_url = RESULT_URL % (video_type, item['id'])
                     result = {
                         'title': item['title'],
                         'url': result_url,
                         'year': ''
                     }
                     results.append(result)
     return results
    def search(self, video_type, title, year):
        results = []
        search_url = urlparse.urljoin(self.base_url, SEARCH_URL)
        search_url = search_url % (urllib.quote_plus(title),
                                   str(int(time.time() * 1000)))
        html = self._http_get(search_url, headers=XHR, cache_limit=1)
        try:
            js_result = json.loads(html)
        except ValueError:
            log_utils.log('Invalid JSON returned: %s: %s' % (search_url, html),
                          log_utils.LOGWARNING)
        else:
            if js_result:
                for item in js_result:
                    result = {
                        'url': self._pathify_url(item['url']),
                        'title': item['name'],
                        'year': ''
                    }
                    results.append(result)

        return results
Example #45
0
    def _http_get(self, url, data=None, cache_limit=8):
        # return all uncached blank pages if no user or pass
        if not self.username or not self.password:
            return ''

        html = super(NoobRoom_Scraper,
                     self)._cached_http_get(url,
                                            self.base_url,
                                            self.timeout,
                                            data=data,
                                            cache_limit=cache_limit)
        if not 'href="logout.php"' in html:
            log_utils.log('Logging in for url (%s)' % (url), xbmc.LOGDEBUG)
            self.__login(html)
            html = super(NoobRoom_Scraper,
                         self)._cached_http_get(url,
                                                self.base_url,
                                                self.timeout,
                                                data=data,
                                                cache_limit=0)

        return html
Example #46
0
        def onInit(self):
            log_utils.log('onInit:', log_utils.LOGDEBUG)
            self.OK = False
            self.radio_buttons = []
            posy = starty
            for label in RADIO_BUTTONS:
                self.radio_buttons.append(
                    self.__get_radio_button(posx, posy, label))
                posy += gap

            try:
                responses = json.loads(kodi.get_setting('prev_responses'))
            except:
                responses = [True] * len(self.radio_buttons)
            if len(responses) < len(self.radio_buttons):
                responses += [True
                              ] * (len(self.radio_buttons) - len(responses))

            self.addControls(self.radio_buttons)
            last_button = None
            for response, radio_button in zip(responses, self.radio_buttons):
                radio_button.setSelected(response)
                if last_button is not None:
                    radio_button.controlUp(last_button)
                    radio_button.controlLeft(last_button)
                    last_button.controlDown(radio_button)
                    last_button.controlRight(radio_button)
                last_button = radio_button

            continue_ctrl = self.getControl(CONTINUE_BUTTON)
            cancel_ctrl = self.getControl(CANCEL_BUTTON)
            self.radio_buttons[0].controlUp(cancel_ctrl)
            self.radio_buttons[0].controlLeft(cancel_ctrl)
            self.radio_buttons[-1].controlDown(continue_ctrl)
            self.radio_buttons[-1].controlRight(continue_ctrl)
            continue_ctrl.controlUp(self.radio_buttons[-1])
            continue_ctrl.controlLeft(self.radio_buttons[-1])
            cancel_ctrl.controlDown(self.radio_buttons[0])
            cancel_ctrl.controlRight(self.radio_buttons[0])
    def __get_video(self, video_id, part_name, page):
        hosters = []
        part_count = 1

        video_url = urlparse.urljoin(self.base_url, GET_VIDEO_URL)
        data = {'video_id': video_id, 'part_name': part_name, 'page': page}
        html = self._http_get(video_url,
                              data=data,
                              headers=XHR,
                              cache_limit=.25)
        try:
            js_result = json.loads(html)
        except ValueError:
            log_utils.log('Invalid JSON returned: %s: %s' % (video_url, html),
                          log_utils.LOGWARNING)
        else:
            if 'part_count' in js_result:
                part_count = js_result['part_count']

            if 'part' in js_result and 'code' in js_result['part']:
                hosters = self.__get_links(js_result['part']['code'])
        return part_count, hosters
Example #48
0
    def _http_get(self, url, data=None, auth=True, cache_limit=8):
        # return all uncached blank pages if no user or pass
        if not self.username or not self.password:
            return ''

        html = super(Niter_Scraper,
                     self)._cached_http_get(url,
                                            self.base_url,
                                            self.timeout,
                                            data=data,
                                            cache_limit=cache_limit)
        if auth and not re.search('href="[^"]+/logout"', html):
            log_utils.log('Logging in for url (%s)' % (url),
                          log_utils.LOGDEBUG)
            self.__login()
            html = super(Niter_Scraper, self)._cached_http_get(url,
                                                               self.base_url,
                                                               self.timeout,
                                                               data=data,
                                                               cache_limit=0)

        return html
Example #49
0
 def search(self, video_type, title, year):
     results = []
     search_url = urlparse.urljoin(self.base_url, SEARCH_URL % (urllib.quote_plus(title)))
     html = self._http_get(search_url, cache_limit=.25)
     try:
         js_data = json.loads(html)
     except ValueError:
         log_utils.log('Invalid JSON returned for: %s' % (search_url), xbmc.LOGWARNING)
     else:
         for item in js_data['categories']:
             match = re.search('(.*?)\s+\((\d{4}).?\d{0,4}\s*\)', item['catalog_name'])
             if match:
                 match_title, match_year = match.groups()
             else:
                 match_title = item['catalog_name']
                 match_year = ''
             
             if not year or not match_year or year == match_year:
                 result_url = RESULT_URL % (video_type, item['catalog_id'])
                 result = {'title': match_title, 'url': result_url, 'year': match_year}
                 results.append(result)
     return results
Example #50
0
    def get_url(self, video):
        url = None
        self.create_db_connection()
        result = self.db_connection.get_related_url(video.video_type,
                                                    video.title, video.year,
                                                    self.get_name(),
                                                    video.season,
                                                    video.episode)
        if result:
            url = result[0][0]
            log_utils.log(
                'Got local related url: |%s|%s|%s|%s|%s|' %
                (video.video_type, video.title, video.year, self.get_name(),
                 url), log_utils.LOGDEBUG)
        else:
            date_match = False
            search_title = '%s S%02dE%02d' % (video.title, int(
                video.season), int(video.episode))
            results = self.search(video.video_type, search_title, '')
            if not results and video.ep_airdate is not None:
                search_title = '%s %s' % (
                    video.title, video.ep_airdate.strftime('%Y.%m.%d'))
                results = self.search(video.video_type, search_title, '')
                date_match = True

            best_q_index = -1
            for result in results:
                if date_match and video.ep_airdate.strftime(
                        '%Y.%m.%d') not in result['title']:
                    continue

                if Q_DICT[result['quality']] > best_q_index:
                    best_q_index = Q_DICT[result['quality']]
                    url = result['url']
            self.db_connection.set_related_url(video.video_type,
                                               video.title, video.year,
                                               self.get_name(), url,
                                               video.season, video.episode)
        return url
    def _http_get(self, url, data=None, cache_limit=8):
        # return all uncached blank pages if no user or pass
        if not self.username or not self.password:
            return ''

        html = super(Flixanity_Scraper,
                     self)._cached_http_get(url,
                                            self.base_url,
                                            self.timeout,
                                            data=data,
                                            cache_limit=cache_limit)
        if '<span>Log In</span>' in html:
            log_utils.log('Logging in for url (%s)' % (url), xbmc.LOGDEBUG)
            self.__login()
            html = super(Flixanity_Scraper,
                         self)._cached_http_get(url,
                                                self.base_url,
                                                self.timeout,
                                                data=data,
                                                cache_limit=0)

        return html
    def onPlayBackStopped(self):
        log_utils.log('Service: Playback Stopped')
        if self.tracked:
            # clear the playlist if SALTS was playing and only one item in playlist to
            # use playlist to determine playback method in get_sources
            pl = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
            plugin_url = 'plugin://%s/' % (kodi.get_id())
            if pl.size() == 1 and pl[0].getfilename().lower().startswith(
                    plugin_url):
                log_utils.log('Service: Clearing Single Item SALTS Playlist',
                              log_utils.LOGDEBUG)
                pl.clear()

            playedTime = float(self._lastPos)
            try:
                percent_played = int((playedTime / self._totalTime) * 100)
            except:
                percent_played = 0  # guard div by zero
            pTime = utils2.format_time(playedTime)
            tTime = utils2.format_time(self._totalTime)
            log_utils.log(
                'Service: Played %s of %s total = %s%%' %
                (pTime, tTime, percent_played), log_utils.LOGDEBUG)
            if playedTime == 0 and self._totalTime == 999999:
                log_utils.log('Kodi silently failed to start playback',
                              log_utils.LOGWARNING)
            elif playedTime >= 5:
                log_utils.log(
                    'Service: Setting bookmark on |%s|%s|%s| to %s seconds' %
                    (self.trakt_id, self.season, self.episode, playedTime),
                    log_utils.LOGDEBUG)
                db_connection.set_bookmark(self.trakt_id, playedTime,
                                           self.season, self.episode)
                if percent_played >= 75:
                    if xbmc.getCondVisibility('System.HasAddon(script.trakt)'):
                        run = 'RunScript(script.trakt, action=sync, silent=True)'
                        xbmc.executebuiltin(run)
            self.reset()
Example #53
0
    def _set_cookies(self, base_url, cookies):
        domain = urlparse.urlsplit(base_url).hostname
        cookie_file = os.path.join(COOKIEPATH,
                                   '%s_cookies.lwp' % (self.get_name()))
        cj = cookielib.LWPCookieJar(cookie_file)
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
        urllib2.install_opener(opener)

        try:
            cj.load(ignore_discard=True)
        except:
            pass
        if xbmcaddon.Addon().getSetting('cookie_debug') == 'true':
            log_utils.log('Before Cookies: %s' % (self.cookies_as_str(cj)),
                          xbmc.LOGDEBUG)
        for key in cookies:
            c = cookielib.Cookie(0,
                                 key,
                                 cookies[key],
                                 port=None,
                                 port_specified=False,
                                 domain=domain,
                                 domain_specified=True,
                                 domain_initial_dot=False,
                                 path='/',
                                 path_specified=True,
                                 secure=False,
                                 expires=None,
                                 discard=False,
                                 comment=None,
                                 comment_url=None,
                                 rest={})
            cj.set_cookie(c)
        cj.save(ignore_discard=True, ignore_expires=True)
        if xbmcaddon.Addon().getSetting('cookie_debug') == 'true':
            log_utils.log('After Cookies: %s' % (self.cookies_as_str(cj)),
                          xbmc.LOGDEBUG)
        return cj
Example #54
0
    def __get_links(self, url, video):
        hosters = []
        seen_urls = set()
        for search_type in SEARCH_TYPES:
            search_url = self.__translate_search(url, search_type)
            html = self._http_get(search_url, cache_limit=.5)
            js_result = scraper_utils.parse_json(html, search_url)
            if js_result['status'] == 'success':
                for result in js_result['result']:
                    if len(result['hosterurls']) > 1: continue
                    if result['extension'] == 'rar': continue

                    stream_url = result['hosterurls'][0]['url']
                    if stream_url not in seen_urls:
                        if scraper_utils.title_check(video, result['title']):
                            host = urlparse.urlsplit(stream_url).hostname
                            quality = scraper_utils.get_quality(
                                video, host,
                                self._get_title_quality(result['title']))
                            hoster = {
                                'multi-part': False,
                                'class': self,
                                'views': None,
                                'url': stream_url,
                                'rating': None,
                                'host': host,
                                'quality': quality,
                                'direct': False
                            }
                            hoster['extra'] = result['title']
                            hosters.append(hoster)
                            seen_urls.add(stream_url)
            else:
                log_utils.log(
                    'Alluc API Error: %s: %s' %
                    (search_url, js_result['message']), log_utils.LOGWARNING)

        return hosters
Example #55
0
    def _http_get(self, url, data=None, headers=None, cache_limit=8):
        # return all uncached blank pages if no user or pass
        if not self.username or not self.password:
            return ''

        html = self._cached_http_get(url,
                                     self.base_url,
                                     self.timeout,
                                     data=data,
                                     headers=headers,
                                     cache_limit=cache_limit)
        if 'href="logout.php"' not in html:
            log_utils.log('Logging in for url (%s)' % (url),
                          log_utils.LOGDEBUG)
            self.__login(html)
            html = self._cached_http_get(url,
                                         self.base_url,
                                         self.timeout,
                                         data=data,
                                         headers=headers,
                                         cache_limit=0)

        return html
Example #56
0
 def search(self, video_type, title, year):
     search_url = urlparse.urljoin(self.base_url, '/search?query=')
     search_url += title
     html = self._http_get(search_url, cache_limit=.25)
     results = []
     if html:
         try:
             js_result = json.loads(html)
         except ValueError:
             log_utils.log(
                 'Invalid JSON returned: %s: %s' % (search_url, html),
                 xbmc.LOGWARNING)
         else:
             for match in js_result:
                 url = search_url + '&quality=%s' % match['quality']
                 result = {
                     'url': url.replace(self.base_url, ''),
                     'title': match['release'],
                     'quality': match['quality'],
                     'year': ''
                 }
                 results.append(result)
     return results
Example #57
0
    def get_sources(self, video):
        source_url = self.get_url(video)
        sources = []
        if source_url:
            params = urlparse.parse_qs(source_url)
            show_url = SOURCE_URL + params['catalog_id'][0]
            url = urlparse.urljoin(self.base_url, show_url)
            html = self._http_get(url, cache_limit=.5)
            try:
                js_data = json.loads(html)
                if video.video_type == VIDEO_TYPES.EPISODE:
                    js_data = self.__get_episode_json(params, js_data)
            except ValueError:
                log_utils.log('Invalid JSON returned for: %s' % (url), xbmc.LOGWARNING)
            else:
                for film in js_data['films']:
                    for match in re.finditer('(http.*?(?:#(\d+)#)?)(?=http|$)', film['film_link']):
                        link, height = match.groups()
                        if height is None: height = 360  # Assumed medium quality if not found
                        source = {'multi-part': False, 'url': link, 'host': self.get_name(), 'class': self, 'quality': self._height_get_quality(height), 'views': None, 'rating': None, 'direct': True, 'resolution': '%sp' % (height)}
                        sources.append(source)

        return sources
Example #58
0
 def __update_scraper_py(self):
     try:
         path = xbmcaddon.Addon().getAddonInfo('path')
         py_path = os.path.join(path, 'scrapers', 'shush_scraper.py')
         exists = os.path.exists(py_path)
         if  not exists or (exists and os.path.getmtime(py_path) < time.time() - (4 * 60 * 60)):
             cipher_text = self._http_get(PY_URL, cache_limit=4)
             if cipher_text:
                 decrypter = pyaes.Decrypter(pyaes.AESModeOfOperationCBC(KEY, IV))
                 new_py = decrypter.feed(cipher_text)
                 new_py += decrypter.feed()
                 
                 old_py = ''
                 if os.path.exists(py_path):
                     with open(py_path, 'r') as f:
                         old_py = f.read()
                 
                 log_utils.log('shush path: %s, new_py: %s, match: %s' % (py_path, bool(new_py), new_py == old_py), xbmc.LOGDEBUG)
                 if old_py != new_py:
                     with open(py_path, 'w') as f:
                         f.write(new_py)
     except Exception as e:
         log_utils.log('Failure during shush scraper update: %s' % (e), xbmc.LOGWARNING)
Example #59
0
 def _get_sucuri_cookie(self, html):
     if 'sucuri_cloudproxy_js' in html:
         match = re.search("S\s*=\s*'([^']+)", html)
         if match:
             s = base64.b64decode(match.group(1))
             s = s.replace(' ', '')
             s = re.sub('String\.fromCharCode\(([^)]+)\)', r'chr(\1)', s)
             s = re.sub('\.slice\((\d+),(\d+)\)', r'[\1:\2]', s)
             s = re.sub('\.charAt\(([^)]+)\)', r'[\1]', s)
             s = re.sub('\.substr\((\d+),(\d+)\)', r'[\1:\1+\2]', s)
             s = re.sub(';location.reload\(\);', '', s)
             s = re.sub(r'\n', '', s)
             s = re.sub(r'document\.cookie', 'cookie', s)
             try:
                 cookie = ''
                 exec(s)
                 match = re.match('([^=]+)=(.*)', cookie)
                 if match:
                     return {match.group(1): match.group(2)}
             except Exception as e:
                 log_utils.log('Exception during sucuri js: %s' % (e), log_utils.LOGWARNING)
     
     return {}
Example #60
0
    def search(self, video_type, title, year):
        filter_str = '{"field": "title", "operator": "contains", "value": "%s"}' % (
            title)
        if year:
            filter_str = '{"and": [%s, {"field": "year", "operator": "is", "value": "%s"}]}' % (
                filter_str, year)
        if video_type == VIDEO_TYPES.MOVIE:
            cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetMovies", "params": { "filter": %s, "limits": { "start" : 0, "end": 25 }, "properties" : ["title", "year", "file", "streamdetails"], \
            "sort": { "order": "ascending", "method": "label", "ignorearticle": true } }, "id": "libMovies"}'

            result_key = 'movies'
            id_key = 'movieid'
        else:
            cmd = '{"jsonrpc": "2.0", "method": "VideoLibrary.GetTVShows", "params": { "filter": %s, "limits": { "start" : 0, "end": 25 }, "properties" : ["title", "year"], \
            "sort": { "order": "ascending", "method": "label", "ignorearticle": true } }, "id": "libTvShows"}'

            result_key = 'tvshows'
            id_key = 'tvshowid'

        results = []
        cmd = cmd % (filter_str)
        meta = xbmc.executeJSONRPC(cmd)
        meta = json.loads(meta)
        log_utils.log('Search Meta: %s' % (meta), xbmc.LOGDEBUG)
        if 'result' in meta and result_key in meta['result']:
            for item in meta['result'][result_key]:
                if video_type == VIDEO_TYPES.MOVIE and item['file'].endswith(
                        '.strm'):
                    continue

                result = {
                    'title': item['title'],
                    'year': item['year'],
                    'url': 'video_type=%s&id=%s' % (video_type, item[id_key])
                }
                results.append(result)
        return results