def __cache_find_absolute_episode(self, tvdb_id, first_air_date, season):
        sql_select = 'SELECT absolute_episode FROM episode_meta WHERE tvdb_id=? AND '
        if first_air_date != '' and season != None:
            sql_select += 'premiered=? AND season=?'
            params = (tvdb_id, first_air_date, season)
        elif first_air_date != '':
            sql_select += 'premiered=?'
            params = (tvdb_id, first_air_date)
        elif season != None:
            sql_select += 'season=? AND episode=1'
            params = (tvdb_id, season)
        else:
            sql_select += 'season=1 AND episode=1'
            params = (tvdb_id, )

        helper.log_debug('SQL select: %s with params %s' %
                         (sql_select, params))
        try:
            self.dbcur.execute(sql_select, params)
            allrows = self.dbcur.fetchall()
            matchedrow = allrows[0] if len(allrows) > 0 else None
            if len(allrows) > 1:
                for row in allrows:
                    if row['absolute_episode'] > 0:
                        matchedrow = row
                        break

        except Exception, e:
            helper.log_debug(
                '************* Error attempting to select from Episode table: %s '
                % e)
            return None
    def get_metadata(self, name):
        if helper.get_setting('enable-metadata') == 'false':
            return {}

        # If we have no previous metadata, and this isn't a mismatch, then
        # we've already had a legitimate try with no luck.
        if not self.mismatch and (args.imdb_id == None
                                  and args.tmdb_id == None):
            helper.log_debug(
                'Not a mismatch and no previous results for movie')
            return {}

        imdb_id = args.imdb_id if args.imdb_id and not self.mismatch else ''
        tmdb_id = args.tmdb_id if args.tmdb_id and not self.mismatch else ''
        should_update = self.mismatch
        metadata = self.meta.get_meta('movie',
                                      name,
                                      imdb_id,
                                      tmdb_id,
                                      update=should_update)

        # Update the tvshow cache to nothing for this name
        if self.mismatch:
            self.meta.update_meta_to_nothing('tvshow', name)
            helper.log_debug('Movie mismatch - new meta: %s' % str(self.meta))

        return metadata
Beispiel #3
0
    def __init__(self, url_val=args.value, form_data=None):
        self.html = ''
        self.soup = None
        self.links = []
        self.has_next_page = False

        from resources.lib.metadata.loose_metahandlers import meta
        self.meta = meta
        from resources.lib.common.nethelpers import net, cookies
        self.net, self.cookies = net, cookies

        if not url_val:
            return

        assert (args.srctype == 'web')
        url = url_val if 'http' in url_val else (helper.domain_url() + url_val)
        self.html, e = self.net.get_html(url, self.cookies,
                                         helper.domain_url(), form_data)
        self.html = helper.handle_html_errors(self.html, e)
        helper.log_debug('HTML is %sempty' %
                         ('' if self.html == '' else 'not '))
        if helper.debug_dump_html():
            helper.log_debug('HTML DUMP: %s' % self.html)

        self.html = self._filter_html(self.html)
        self.soup = BeautifulSoup(self.html,
                                  "html.parser") if self.html != '' else None
    def parse(self):
        helper.start('MediaContainerList.parse')
        if self.soup == None:
            return

        timestamper = t_s.TimeStamper('MediaContainerList.parse')
        table = self.soup.find('table', class_='listing')
        if table == None:
            self.links = self.__parse_upcoming()
            timestamper.stamp_and_dump()
            return

        self.links = table.find_all('a', {'href': re.compile('\/Anime\/')})
        helper.log_debug('# of links found with href=/Anime/: %d' %
                         len(self.links))

        # Pagination support
        pager_section = self.soup.find('ul', class_='pager')
        if pager_section != None:
            page_links = pager_section.find_all('a')
            if "Next" in page_links[-2].string and "Last" in page_links[
                    -1].string:
                self.links.append(page_links[-2])
                self.links.append(page_links[-1])
                self.has_next_page = True
        helper.end('MediaContainerList.parse')
        timestamper.stamp_and_dump()
Beispiel #5
0
    def play(self):
        if self.link == '':
            return

        import urlresolver
        ''' 
            Kodi v17 updated to Python 2.7.10 from 2.7.5 in v16, which introduced this error when 
            trying to resolve the url:
            http://stackoverflow.com/questions/27835619/ssl-certificate-verify-failed-error

            In Kodi v16, python would not verify the SSL certs or at least ignore, but they changed
            with the upgrade to v17.  Sometimes the cert from the initial decrypted URL is invalid,
            so to avoid errors, we temporarily disable SSL to get the resolved URL.  Although this
            workaround isn't secure, I figure it's not any worse than before...
        '''
        try:
            url = urlresolver.resolve(self.link)
        except:
            helper.log_debug(
                'Attempt to resolve URL failed; trying again with SSL temporarily disabled (v16 behavior)'
            )
            import ssl
            default_context = ssl._create_default_https_context  # save default context
            ssl._create_default_https_context = ssl._create_unverified_context
            url = urlresolver.resolve(self.link)
            ssl._create_default_https_context = default_context  # restore default context

        helper.log_debug("UrlResolver's resolved link: %s" % url)
        helper.resolve_url(url)
    def _get_tvdb_meta(self, imdb_id, name, year=''):
        '''
        Requests meta data from TVDB and creates proper dict to send back.
        This version is a bit looser in determining if we can use the given 
        results, and also checks against aliases.
        
        Args:
            imdb_id (str): IMDB ID
            name (str): full name of movie you are searching
        Kwargs:
            year (str): 4 digit year of movie, when imdb_id is not available it is recommended
                        to include the year whenever possible to maximize correct search results.
                        
        Returns:
            DICT. It must also return an empty dict when
            no movie meta info was found from tvdb because we should cache
            these "None found" entries otherwise we hit tvdb alot.
        '''
        common.addon.log('Starting TVDB Lookup', 0)
        helper.start('_get_tvdb_meta')
        tvdb = TheTVDB(language=self._MetaData__get_tvdb_language())
        tvdb_id = ''

        try:
            if imdb_id:
                tvdb_id = tvdb.get_show_by_imdb(imdb_id)
                helper.log_debug(
                    'Attempt to get show from imdb_id %s gave us tvdb_id %s.' %
                    (imdb_id, tvdb_id))
        except Exception, e:
            common.addon.log(
                '************* Error retreiving from thetvdb.com: %s ' % e, 4)
            tvdb_id = ''
            pass
    def __get_key_from_html(self):
        # Find the last line to set skH, the seed to the key
        set_skH_dict = {}
        for filename in ['vr.js', 'skH =']:
            set_skH_dict[filename] = self.html.rfind(filename)

        sorted_skH = sorted(set_skH_dict, key=set_skH_dict.get, reverse=True)
        last_set_skH_file = sorted_skH[0]
        last_set_skH_line = set_skH_dict[last_set_skH_file]

        skH = self.__get_base_skH(last_set_skH_file)

        # Find modifications after the last line that sets skH
        js_dict = {}
        for f in ['shal.js', 'moon.js', 'file3.js']:
            line_num = self.html.find(f)
            if line_num > last_set_skH_line:
                js_dict[f] = self.html.find(f)

        # Sort and then apply modifications in order of appearance
        for filename in sorted(js_dict, key=js_dict.get, reverse=False):
            skH = self.__update_skH(skH, filename)

        import hashlib
        key = hashlib.sha256(skH).hexdigest()
        helper.log_debug('Found the decryption key: %s' % key)

        return key.decode('hex')
    def _get_tvshow_backdrops(self, imdb_id, tvdb_id):
        # Some of us aren't so lucky as to have both an imdb_id and a tvdb_id
        if not imdb_id and not tvdb_id:
            helper.log_debug(
                'Cannot get tv show backdrop with neither type of id supplied')
            return ''

        sql_select = "SELECT backdrop_url FROM tvshow_meta WHERE %s=?" % (
            'imdb_id' if imdb_id else 'tvdb_id')
        id = imdb_id if imdb_id else str(tvdb_id)

        common.addon.log('SQL Select: %s params: %s' % (sql_select, id), 0)
        try:
            self.dbcur.execute(sql_select, (id, ))
            matchedrow = self.dbcur.fetchone()
        except Exception as e:
            common.addon.log(
                '************* Error attempting to select from tvshow_meta table: %s '
                % e, 4)
            pass
            return ''

        if matchedrow:
            return dict(matchedrow)['backdrop_url']
        else:
            return ''
    def __decrypt_link(self, url):
        iv = 'a5e8d2e9c1721ae0e84ad660c472c1f3'.decode('hex')
        if (helper.debug_decrypt_key() != ''):
            helper.log_debug('Using the key input from the debug settings')
            key = helper.debug_decrypt_key().decode('hex')
        else:
            helper.log_debug('Attempting to get key from html')
            key = self.__get_key_from_html()

        return self.__decrypt_text(key, iv, url)
 def __update_skH(self, skH, filename):
     if filename == 'shal.js':
         return skH + '6n23ncasdln213'
     elif filename == 'moon.js':
         return skH + 'znsdbasd012933473'
     elif filename == 'file3.js':
         return skH.replace('a', 'c')
     else:
         helper.log_debug('Failed to recognize skH modifier file %s' %
                          filename)
         return skH
 def _parseJSString(self, s):
     '''
         Credit to lambda - https://offshoregit.com/lambda81/
         plugin.video.genesis\resources\lib\libraries\cloudflare.py        
     '''
     try:
         offset=1 if s[0] == '+' else 0
         val = int(eval(s.replace('!+[]','1').replace('!![]','1').replace('[]','0').replace('(','str(')[offset:]))
         return val
     except Exception as e:
         helper.log_debug('_parseJSString failed with exception %s' % str(e))
         pass
    def parse(self):
        if self.soup == None:
            return

        self.quality_options = self.soup.find(id='slcQualix')
        if self.quality_options:
            helper.log_debug('Using KissAnime or Beta servers')
            self.links = self.quality_options.find_all('option')
        else:
            helper.log_debug(
                'Could not find KissAnime server links; attempting to find Openload link'
            )
            try:
                # The Openload link is the default video link
                video_str = self.__get_default_video_link()
                from bs4 import BeautifulSoup
                fake_soup = BeautifulSoup(
                    '<option value="%s">Openload</option>' % video_str,
                    "html.parser")
                self.links = fake_soup.find_all('option')
                helper.log_debug(
                    'Successfully found and parsed Openload link %s' %
                    self.links)
            except Exception as e:
                self.links = []
                helper.log_debug(
                    'Failed to parse Openload link with exception: %s' %
                    str(e))
                helper.show_error_dialog([
                    'Could not find supported video link for this episode/movie'
                ])
 def __get_base_skH(self, filename):
     if filename == 'vr.js':
         return 'nhasasdbasdtene7230asb'
     elif filename == 'skH =':
         # We need to find the last skH before ovelwrap
         split1 = self.html.split("ovelWrap($('#slcQualix').val())")[0]
         split2 = split1.split('skH =')[-2]
         obfuscated_list_str = '[' + split2.split('[')[-1].strip('; ')
         import ast
         obfuscated_list = ast.literal_eval(obfuscated_list_str)
         return obfuscated_list[0]
     else:
         helper.log_debug('Failed to recognize base skH file %s' % filename)
         return ''
    def get_html(self, url, cookies, referer, form_data=None):
        html = ''
        try:
            self.set_cookies(cookies)
            helper.log_debug('Performing a %s operation' % ('POST' if form_data else 'GET'))
            if form_data:
                html = self.http_POST(url, form_data, headers={'Referer':referer}).content
            else:
                html = self.http_GET(url, headers={'Referer':referer}).content
            if html != '':
                helper.log_debug("Saving cookies")
                self.save_cookies(cookies)
            helper.log_debug("Operation complete")
            return (html, None)
        except urllib2.URLError as e: 
            return ('', e)
        except Exception as e:
            if helper.debug_dump_html():
                helper.log_debug("html response in exception: %s" % html)
            return ('', e)

        if len(html) > 0:
            self.save_cookies(cookies)
        
        return (html, None)
    def search_and_update(self):
        while True:
            idx = helper.present_selection_dialog('Choose a title to search for', self.options)
            helper.log_debug('User selected index %d' % idx)
            default_text = self.options[idx] if idx != 0 else ''
            search_string = helper.get_user_input('Type the show to find metadata for', default_text)
            if search_string == None:
                helper.log_debug('User cancelled manual metadata search')
                return
            elif not search_string:
                helper.show_ok_dialog(['Invalid search query.  Please try again'])
            else:
                break

        helper.show_busy_notification()
        mc_list = media_container_list.MediaContainerList(None)
        metadata, media_type = mc_list.get_metadata(search_string)

        # Grab the ID and the actual title, which might have gotten stripped of
        # the year because of a mismatch...
        if media_type == 'tvshow':
            tmdb_id = metadata.get('tvdb_id', '')
            actual_title = mc_list.clean_tv_show_name(mc_list.clean_name(args.full_title))
        else:
            tmdb_id = metadata.get('tmdb_id', '')
            actual_title = mc_list.clean_name(args.full_title)

        helper.log_debug('Metadatafinder results: %s, %s, %s' % (tmdb_id, media_type, metadata))
        if tmdb_id:
            helper.log_debug('Found metadata from search for %s; refreshing the page' % args.base_title)
            self.meta.update_meta(media_type, actual_title, imdb_id='', new_tmdb_id=tmdb_id, new_imdb_id=metadata.get('imdb_id'))
            helper.refresh_page()
        else:
            helper.show_ok_dialog(['Did not find any metadata from the search query.  Please try again.'])
        helper.close_busy_notification()
    def get_actual_media_type(self):
        # 1.1) The metadata classification may have failed earlier before because
        # of lack of data.  We can fix any potential mismatches here.
        if 'Movie' in self.genres:
            helper.log_debug('|COUNT|MISMATCH| %s' % args.full_title)
            return 'tvshow'#'movie'

        # 1.2) We have a special, let's handle just use the season 0 data along with the show banner
        if 'OVA' in self.genres or ('(OVA)' in args.full_title or ' Specials' in args.full_title or
            re.search('( OVA)( \(((Sub)|(Dub))\))?$', args.full_title) != None or
            re.search(' (Special)$', args.full_title) != None):
            helper.log_debug('|COUNT|OVA| %s' % args.full_title)
            return 'tvshow'#'special'

        return 'tvshow'
 def __update_row(self, id):
     sql_update = 'INSERT OR REPLACE INTO last_visited '\
         '(id, action, srctype, value, icon, fanart, full_mc_name, base_mc_name, '\
         'imdb_id, tvdb_id, tmdb_id, media_type) '\
         'VALUES (' + (', '.join('?' * 12)) + ')'
     # Be sure to decode the names which may contain funky characters!
     full_mc_name = args.full_mc_name.decode('utf8')
     base_mc_name = args.base_mc_name.decode('utf8')
     data = (id, args.action, args.srctype, args.value, args.icon,
             args.fanart, full_mc_name, base_mc_name, args.imdb_id,
             args.tvdb_id, args.tmdb_id, args.media_type)
     helper.log_debug('SQL INSERT OR REPLACE: %s with params %s' %
                      (sql_update, str(data)))
     self.dbcur.execute(sql_update, data)
     self.dbcon.commit()
 def search(self):
     helper.start('search')
     search_string = helper.get_user_input('Search for show title')
     if search_string:
         url = helper.domain_url() + 'AdvanceSearch'
         form_data = {
             'animeName': search_string,
             'genres': '0',
             'status': ''
         }
         helper.log_debug(
             'Searching for show using url %s and form data %s' %
             (url, str(form_data)))
         from resources.lib.list_types.media_container_list import MediaContainerList
         self._show_list(MediaContainerList(url, form_data))
     helper.end('search')
    def parse(self):
        helper.start('EpisodeList.parse')
        if self.soup == None:
            return

        # Note that there are some lists/listings that do not have any episodes (!)
        table = self.soup.find('table', class_='listing')
        self.links = table.find_all('a') if table else []
        spans = self.soup.find_all('span', class_='info')
        helper.log_debug('# of links found: %d' % len(self.links))

        # We can determine if the media is a movie or not examining the genres
        span = [s for s in spans if s.string == 'Genres:']
        if span != []:
            genre_links = span[0].parent.find_all('a')
            self.genres = [link.string for link in genre_links]
            helper.log_debug('Found the genres: %s' % str(self.genres))

        # We'll try to determine the episode list from the first date
        span = [s for s in spans if s.string == 'Date aired:']
        if span != []:
            air_date = span[0].next_sibling.encode('ascii', 'ignore').strip().split(' to ')[0]
            air_datetime = helper.get_datetime(air_date, '%b %d, %Y')
            self.first_air_date = air_datetime.strftime('%Y-%m-%d')
            helper.log_debug('Found the first air date: %s' % str(self.first_air_date))

        # We'll try to determine the season from the alternate names, if necessary
        span = [s for s in spans if s.string == 'Other name:']
        if span != []:
            alias_links = span[0].parent.find_all('a')
            # Only keep aliases that do not contain CJK (eg, Japanese) characters
            f = lambda c: ord(c) > 0x3000
            self.aliases = [link.string for link in alias_links if filter(f, link.string) == u'']
            helper.log_debug('Found the aliases: %s' % str(self.aliases))

        # Grab the related links and the bookmark ID
        rightboxes = self.soup.find('div', id='rightside').find_all('div', class_='rightBox')
        if len(rightboxes) > 1:
            related = rightboxes[1].find('div', class_='barContent').find_all('a')
            for link in related:
                self.related_links.append(link)
                # Sometimes the related container includes episodes which are 
                # dead links.  This is the best way to filter them out.
                try:
                    has_class = dict(link.next_sibling.next_sibling.attrs).has_key('class')
                    if has_class and link.next_sibling.next_sibling['class'][0] == u'line':
                        break
                except:
                    pass

        self.bookmark_id = self.html.split('animeID=')[1].split('"')[0] if 'animeID=' in self.html else None

        # Sort episodes in ascending order by default
        self.links.reverse()

        helper.end('EpisodeList.parse')
        return
Beispiel #20
0
    def __decrypt_link(self, url):
        iv = 'a5e8d2e9c1721ae0e84ad660c472c1f3'.decode('hex')

        #strmn = '//storage//.kodi//addons//plugin.video.unofficialkissanime//testfile.log'
        #import xbmcvfs
        #file_desc = xbmcvfs.File(strmn, 'w')
        #result=file_desc.write(str(url))
        #file_desc.close()

        if (helper.debug_decrypt_key() != ''):
            helper.log_debug('Using the key input from the debug settings')
            key = helper.debug_decrypt_key().decode('hex')
        else:
            helper.log_debug('Attempting to get key from html')
            key = self.__get_key_from_html()

        return self.__decrypt_text(key, iv, url.decode('base64'))
    def add_items(self):
        preset_quality = int(helper.get_setting('preset-quality').strip('p'))

        for option in self.links:
            quality = option.string
            if quality == 'Openload' or preset_quality >= int(
                    quality.replace('Default quality - ', '').strip('p')):
                helper.log_debug(
                    'Found media to play at matching quality: %s' % quality)
                url_to_play = option['value']
                break

        if url_to_play == None:
            helper.log_debug(
                'No matching quality found; using the lowest available')
            url_to_play = self.links[-1]['value']

        self.link = self._decode_link(url_to_play)
Beispiel #22
0
    def __get_best_link_for_preset_quality(self, links):
        preset_quality = int(helper.get_setting('preset-quality').strip('p'))

        for link in links:
            quality = link[0]
            if quality == 'Openload' or preset_quality >= int(
                    quality.replace('Default quality - ', '').strip('p')):
                helper.log_debug(
                    'Found media to play at matching quality: %s' % quality)
                url_to_play = link[1]
                break

        if url_to_play == None:
            helper.log_debug(
                'No matching quality found; using the lowest available')
            url_to_play = links[-1]['value']

        return url_to_play
Beispiel #23
0
    def __get_quality_links(self):
        if self.soup == None:
            return

        # Find the links
        raw_links = []
        quality_options = self.soup.find(id="slcQualix")

        if quality_options:
            helper.log_debug('Using KissAnime or Beta servers')
            raw_links = quality_options.find_all("option")

        else:
            helper.log_debug(
                'Could not find KissAnime server links; attempting to find Openload link'
            )
            try:
                # The Openload link is the default video link
                video_str = self.__get_default_video_link()
                from bs4 import BeautifulSoup
                fake_soup = BeautifulSoup(
                    '<option value="%s">Openload</option>' % video_str,
                    "html.parser")
                raw_links = fake_soup.find_all('option')
                helper.log_debug(
                    'Successfully found and parsed Openload link %s' %
                    raw_links)
            except Exception as e:
                raw_links = []
                helper.log_debug(
                    'Failed to parse Openload link with exception: %s' %
                    str(e))
                helper.show_error_dialog([
                    'Could not find supported video link for this episode/movie'
                ])

        # Process the links
        links = []
        for option in raw_links:
            quality = option.string

            link_val = self._decode_link(option['value'])

            # If we failed to decode the link, then we'll just use the already selected option
            # and ignore the rest
            if not link_val:
                helper.show_error_dialog([
                    'Failed to decrypt any video links.  Videos with default Openload sources should still work.'
                ])
                break

            links.append((quality, link_val))

        return links
    def __determine_season(self):
        # 3.1) The next best thing is to examine the full name vs the base 
        # name and look for any season stuff
        clean_title = self.clean_name(args.full_title)
        leftovers = clean_title.replace(args.base_title, '')
        season = self.__extract_season(leftovers)
        if season != None:
            helper.log_debug('|COUNT|BASE| %s' % args.full_title)
            # We have a season, let's extract it and work from there
            return season

        # 3.2) The next best thing after that is to examine the alternate 
        # names and look for any season stuff
        for alias in self.aliases:
            season = self.__extract_season(alias)
            if season != None:
                helper.log_debug('|COUNT|ALIAS| %s' % args.full_title)
                return season

        return None
    def _resolve_cloudflare(self, url, challenge, form_data={}, headers={}, compression=True):
        """
            Asks _cloudflare for an URL with the answer to overcome the 
            challenge, and then attempts the resolution.
        """
        helper.start("_resolve_cloudflare")
        parsed_url = urlparse(url)
        cloudflare_url = urlunparse((parsed_url.scheme, parsed_url.netloc, '', '', '', ''))
        query = self._get_cloudflare_answer(cloudflare_url, challenge, form_data, headers, compression)

        # Use the cloudflare jar instead for this attempt; revert back to 
        # main jar after attempt with call to update_opener()
        self._update_opener_with_cloudflare()

        try:
            helper.log_debug("Attempting to resolve the challenge")
            response = Net._fetch(self, query, form_data, headers, compression)
            helper.log_debug("Resolved the challenge, updating cookies")
            for c in self._cloudflare_jar:
                self._cj.set_cookie(c)
            self._update_opener()
        except urllib2.HTTPError as e:
            helper.log_debug("Failed to resolve the cloudflare challenge with exception %s" % str(e))
            self._update_opener()
            pass
        helper.end('_resolve_cloudflare')
    def __init__(self, url_val=args.value, form_data=None):
        self.html = ''
        self.soup = None
        self.links = []
        self.has_next_page = False
        from resources.lib.common.nethelpers import net, cookies
        self.net, self.cookies = net, cookies

        user_agent = 'Mozilla/5.0 (Linux; Android 4.0.4; Galaxy Nexus Build/IMM76B) AppleWebKit/535.19 (KHTML, like Gecko) Chrome/18.0.1025.133 Mobile Safari/535.19'
        #scraper = cfscrape.get_tokens(helper.domain_url(),user_agent = user_agent)#cfscrape.create_scraper()

        #import cookielib
        #ck = cookielib.Cookie(version=0, name='reqkey', value='rk1', port=None, port_specified=False,
        #domain='helper.domain()', domain_specified=False, domain_initial_dot=False,
        #path='/', path_specified=True, secure=False, expires=None, discard=False, comment=None, comment_url=None, rest={'HttpOnly': None}, rfc2109=False)

        ##self.net._cj.clear(helper.domain(),'/','reqkey')

        #self.net._cj.set_cookie(ck)
        #self.net.save_cookies(self.cookies)

        #helper.show_error_dialog(['',str(scraper[0]['cf_clearance'])])

        if not url_val:
            return

        url_val = self.__fix_up_url(url_val)
        url = url_val if 'http' in url_val else (helper.domain_url() + url_val)
        #helper.show_error_dialog(['',str(url)])
        #self.net._fetch(url, form_data)
        self.html, e = self.net.get_html(url, self.cookies,
                                         helper.domain_url(), form_data)
        #helper.show_error_dialog(['',str(self.html.encode('utf-8'))])
        self.html = helper.handle_html_errors(self.html, e)
        helper.log_debug('HTML is %sempty' %
                         ('' if self.html == '' else 'not '))

        self.html = self._filter_html(self.html)
        self.soup = BeautifulSoup(self.html,
                                  "html.parser") if self.html != '' else None
Beispiel #27
0
    def determine_quality(self):
        helper.start('QualityPlayer.determine_quality')

        #helper.show_error_dialog(['',str(self.html)])
        #self.link = self.html.split('|||')[0]
        target = self.html.split('|||')[0]
        target = target.replace('www.rapidvideo.com/e/',
                                'www.rapidvideo.com/?v=')
        params_url, e = self.net.get_html('%s&q=360p' % target, self.cookies,
                                          helper.domain_url())
        quali = re.findall(r'&q=(.*?)"', params_url)
        quali = quali[::-1]
        quali_choser = helper.present_selection_dialog(
            'Choose the quality from the options below', quali)
        if (quali_choser != -1):
            params_url, e = self.net.get_html(
                '%s&q=%s' % (target, quali[quali_choser]), self.cookies,
                helper.domain_url())
            target = re.search(
                '<source\ssrc=\"([^\"]+)\"\s.+title=\"([^\"]+)\"\s.+?>',
                params_url).group(1)  #',\ssrc: \"([^\"]+?)\"'
            #helper.show_error_dialog(['',str(target)])
            helper.resolve_url(target)
        target = ''

        #links = self.__get_quality_links()
        #if len(links) == 0:
        #    return
        #if helper.get_setting('preset-quality') == 'Individually select':
        #    quality_options = [item[0] for item in links]
        #    idx = helper.present_selection_dialog('Choose the quality from the options below', quality_options)
        #    if idx != -1:
        #        self.link = links[idx][1]
        #else:
        #    self.link = self.__get_best_link_for_preset_quality(links)
        helper.log_debug('the chosen link: %s' % self.link)
        helper.end('QualityPlayer.determine_quality')
    def get_metadata(self, name):
        helper.start('MediaContainerList.get_metadata - name: %s' % name)
        if helper.get_setting('enable-metadata'
                              ) == 'false' or name == 'Next' or name == 'Last':
            return {}, ''

        name_for_movie_search = self.clean_name(name)
        name_for_tv_search = self.clean_tv_show_name(name_for_movie_search)
        media_type = 'tvshow'

        # Not sure if movie or tv show; try tv show first
        metadata = self.meta.get_meta('tvshow',
                                      name_for_tv_search)  #, year=year)
        helper.log_debug('Got metadata %s for show %s' %
                         (metadata, name_for_tv_search))
        # It may be a movie, so let's try that with the general cleaned name
        if metadata['tvdb_id'] == '':
            metadata = self.meta.get_meta('movie',
                                          name_for_movie_search)  #, year=year)
            # If movie failed, and if there was a year in the name, try tv without it
            if metadata['tmdb_id'] == '' and re.search(
                    '( \([12][0-9]{3}\))$', name_for_tv_search) != None:
                metadata = self.meta.get_meta('tvshow',
                                              name_for_tv_search[:-7],
                                              update=True)
                if metadata['imdb_id'] != '':
                    metadata = self.meta.update_meta(
                        'tvshow',
                        name_for_tv_search,
                        imdb_id='',
                        new_imdb_id=metadata['imdb_id'])
            elif metadata['tmdb_id'] != '':  # otherwise we found a movie
                media_type = 'movie'

        helper.end('MediaContainerList.get_metadata')
        return (metadata, media_type)
    def _get_cloudflare_answer(self, url, challenge, form_data={}, headers={}, compression=True):
        '''
            Use the cloudflare cookie jar to overcome the cloudflare challenge.
            Returns an URL with the answer to try.

            Credit to lambda - https://offshoregit.com/lambda81/
            plugin.video.genesis\resources\lib\libraries\cloudflare.py        
        '''
        helper.start("_get_cloudflare_answer")
        if not challenge:
            helper.log_debug('Challenge is empty, re')
            raise ValueError('Challenge is empty')

        try:
            jschl = re.compile('name="jschl_vc" value="(.+?)"/>').findall(challenge)[0]
            init_str = re.compile('setTimeout\(function\(\){\s*.*?.*:(.*?)};').findall(challenge)[0]
            builder = re.compile(r"challenge-form\'\);\s*(.*)a.v").findall(challenge)[0]
            decrypt_val = self._parseJSString(init_str)
            lines = builder.split(';')
        except Exception as e:
            helper.log_debug('Failed to parse the challenge %s' % str(challenge))
            lines = []
            raise

        try:
            for line in lines:
                if len(line) > 0 and '=' in line:
                    sections = line.split('=')
                    line_val = self._parseJSString(sections[1])
                    decrypt_val = int(eval(str(decrypt_val) + sections[0][-1] + str(line_val)))
        except Exception as e:
            helper.log_debug('Failed to find the decrypt_val from the lines')
            raise

        path = urlparse(url).path
        netloc = urlparse(url).netloc
        if not netloc:
            netloc = path

        answer = decrypt_val + len(netloc)

        url = url.rstrip('/')
        query = '%s/cdn-cgi/l/chk_jschl?jschl_vc=%s&jschl_answer=%s' % (url, jschl, answer)

        if 'type="hidden" name="pass"' in challenge:
            passval = re.compile('name="pass" value="(.*?)"').findall(challenge)[0]
            query = '%s/cdn-cgi/l/chk_jschl?pass=%s&jschl_vc=%s&jschl_answer=%s' % \
                    (url, urllib.quote_plus(passval), jschl, answer)
            time.sleep(9)

        helper.end("_get_cloudflare_answer")
        return query
 def __filter_meta_list_by_season(self, meta_list, season, num_episodes):
     helper.log_debug('Filtering metadata list by season %d' % season)
     tmp_meta_list = []
     for meta in meta_list:
         if num_episodes == 0:
             break
         if len(tmp_meta_list) > 0:  # in sequence
             helper.log_debug('Found next meta')
             tmp_meta_list.append(meta)
             num_episodes -= 1
         elif meta['season'] == season:
             helper.log_debug('Found first meta')
             tmp_meta_list.append(meta)
             num_episodes -= 1
         else:
             helper.log_debug('Skipping meta')
             pass
     final_meta_list = sorted(tmp_meta_list,
                              key=lambda d: d['absolute_episode'])
     return tmp_meta_list