Beispiel #1
0
 def query(self, filepath, languages, keywords, series, season, episode):
     request_series = series.lower().replace(' ', '-').replace('&', '@').replace('(','').replace(')','')
     if PY2 and isinstance(request_series, text_type):
         request_series = unicodedata.normalize('NFKD', request_series).encode('ascii', 'ignore')
     logger.debug(u'Getting subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
     r = self.session.get('%s/%s/%sx%.2d' % (self.server_url, quote(request_series), season, episode))
     if r.status_code == 404:
         logger.debug(u'Could not find subtitles for %s season %d episode %d with languages %r' % (series, season, episode, languages))
         return []
     if r.status_code != 200:
         logger.error(u'Request %s returned status code %d' % (r.url, r.status_code))
         return []
     soup = BeautifulSoup(r.content, self.required_features)
     subtitles = []
     for sub in soup('div', {'id': 'version'}):
         sub_keywords = split_keyword(self.release_pattern.search(sub.find('p', {'class': 'title-sub'}).contents[1]).group(1).lower())
         if keywords and not keywords & sub_keywords:
             logger.debug(u'None of subtitle keywords %r in %r' % (sub_keywords, keywords))
             continue
         for html_language in sub.findAllNext('ul', {'class': 'sslist'}):
             language = self.get_language(html_language.findNext('li', {'class': 'li-idioma'}).find('strong').contents[0].string.strip())
             if language not in languages:
                 logger.debug(u'Language %r not in wanted languages %r' % (language, languages))
                 continue
             html_status = html_language.findNext('li', {'class': 'li-estado green'})
             status = html_status.contents[0].string.strip()
             if status != 'Completado':
                 logger.debug(u'Wrong subtitle status %s' % status)
                 continue
             path = get_subtitle_path(filepath, language, self.config.multi)
             subtitle = ResultSubtitle(path, language, self.__class__.__name__.lower(), html_status.findNext('span', {'class': 'descargar green'}).find('a')['href'],
                                       keywords=sub_keywords)
             subtitles.append(subtitle)
     return subtitles
Beispiel #2
0
    def _search_provider(self, search_params, search_mode='eponly', epcount=0, **kwargs):

        results = []
        if not self.url:
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict([(k, re.compile('(?i)' + v)) for (k, v) in iteritems({
            'info': 'detail', 'get': 'download[^"]+magnet', 'tid': r'.*/(\d{5,}).*',
            'verify': '(?:helper|moderator|trusted|vip)', 'size': r'size[^\d]+(\d+(?:[.,]\d+)?\W*[bkmgt]\w+)'})])

        for mode in search_params:
            for search_string in search_params[mode]:
                search_string = unidecode(search_string)

                s_mode = 'browse' if 'Cache' == mode else 'search'
                for i in ('', '2'):
                    search_url = self.urls['%s%s' % (s_mode, i)]
                    if 'Cache' != mode:
                        search_url = search_url % quote(search_string)

                    html = self.get_url(search_url)
                    if self.should_skip():
                        return results

                    if html and not self._has_no_results(html):
                        break
                        
                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html):
                        self._url = None
                        raise generic.HaltParseException

                    with BS4Parser(html, parse_only=dict(table={'id': 'searchResult'})) as tbl:
                        tbl_rows = [] if not tbl else tbl.find_all('tr')

                        if 2 > len(tbl_rows):
                            raise generic.HaltParseException

                        head = None
                        for tr in tbl.find_all('tr')[1:]:
                            cells = tr.find_all('td')
                            if 3 > len(cells):
                                continue
                            try:
                                head = head if None is not head else self._header_row(tr)
                                seeders, leechers = [try_int(cells[head[x]].get_text().strip())
                                                     for x in ('seed', 'leech')]
                                if self._reject_item(seeders, leechers):
                                    continue

                                info = tr.find('a', title=rc['info'])
                                title = info.get_text().strip().replace('_', '.')
                                tid = rc['tid'].sub(r'\1', str(info['href']))
                                download_magnet = tr.find('a', title=rc['get'])['href']
                            except (AttributeError, TypeError, ValueError):
                                continue

                            if self.confirmed and not tr.find('img', title=rc['verify']):
                                logger.log(u'Skipping untrusted non-verified result: ' + title, logger.DEBUG)
                                continue

                            # Check number video files = episode in season and
                            # find the real Quality for full season torrent analyzing files in torrent
                            if 'Season' == mode and 'sponly' == search_mode:
                                ep_number = int(epcount // len(set(show_name_helpers.allPossibleShowNames(
                                    self.show_obj))))
                                title = self._find_season_quality(title, tid, ep_number)

                            if title and download_magnet:
                                size = None
                                try:
                                    size = rc['size'].findall(tr.find_all(class_='detDesc')[0].get_text())[0]
                                except (BaseException, Exception):
                                    pass

                                items[mode].append((title, download_magnet, seeders, self._bytesizer(size)))

                except generic.HaltParseException:
                    pass
                except (BaseException, Exception):
                    logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR)
                self._log_search(mode, len(items[mode]) - cnt, search_url)

            results = self._sort_seeding(mode, results + items[mode])

        return results
Beispiel #3
0
    def _authorised(self, **kwargs):
        result = False
        if self.digest:
            digest = [x[::-1] for x in self.digest[::-1].rpartition('=')]
            self.digest = digest[2] + digest[1] + quote(unquote(digest[0]))
            params = dict(logged_in=(lambda y='': all([
                self.url and self.session.cookies.get_dict(
                    domain='.' + urlparse(self.url).netloc) and self.session.
                cookies.clear('.' + urlparse(self.url).netloc) is None or True
            ] + [
                'RSS' in y, 'type="password"' not in y,
                self.has_all_cookies(['speedian'], 'inSpeed_')
            ] + [(self.session.cookies.get('inSpeed_' + c) or 'sg!no!pw') in
                 self.digest for c in ['speedian']])),
                          failed_msg=(lambda y=None: None),
                          post_params={'login': False})
            result = super(SpeedCDProvider, self)._authorised(**params)

        if not result and not self.failure_count:
            if self.url and self.digest:
                self.get_url('%slogout.php' % self.url,
                             skip_auth=True,
                             post_data={
                                 'submit.x': 24,
                                 'submit.y': 11
                             })
            self.digest = ''
            params = dict(
                logged_in=(lambda y='': all([
                    self.session.cookies.get_dict(domain='.speed.cd') and self.
                    session.cookies.clear('.speed.cd') is None or True
                ] + [bool(y), not re.search('(?i)type="password"', y)] + [
                    re.search('(?i)Logout', y) or not self.digest or
                    (self.session.cookies.get('inSpeed_speedian') or 'sg!no!pw'
                     ) in self.digest
                ])),
                failed_msg=
                (lambda y='':
                 (re.search(
                     r'(?i)(username|password)((<[^>]+>)|\W)*' +
                     r'(or|and|/|\s)((<[^>]+>)|\W)*(password|incorrect)', y
                 ) and u'Invalid username or password for %s. Check settings'
                  or
                  u'Failed to authenticate or parse a response from %s, abort provider'
                  )),
                post_params={'form_tmpl': True})
            self.urls['login_action'] = self.urls.get('do_login')
            session = super(SpeedCDProvider, self)._authorised(session=None,
                                                               resp_sess=True,
                                                               **params)
            self.urls['login_action'] = ''
            if session:
                self.digest = 'inSpeed_speedian=%s' % session.cookies.get(
                    'inSpeed_speedian')
                sickbeard.save_config()
                result = True
                logger.log('Cookie details for %s updated.' % self.name,
                           logger.DEBUG)
            elif not self.failure_count:
                logger.log(
                    'Invalid cookie details for %s and login failed. Check settings'
                    % self.name, logger.ERROR)
        return result
Beispiel #4
0
    def _update(self, host=None, show_name=None):
        """ Handle updating Kodi host via HTTP API

        Update the video library for a specific tv show if passed, otherwise update the whole library if option enabled.

        Args:
            show_name: Name of a TV show to target for a library update

        Return:
            True or False
        """

        if not host:
            self._log_warning(u'No host specified, aborting update')
            return False

        self._log_debug(u'Updating library via HTTP method for host: %s' %
                        host)

        # if we're doing per-show
        if show_name:
            self._log_debug(u'Updating library via HTTP method for show %s' %
                            show_name)

            # noinspection SqlResolve
            path_sql = 'SELECT path.strPath' \
                       ' FROM path, tvshow, tvshowlinkpath' \
                       ' WHERE tvshow.c00 = "%s"' % show_name \
                       + ' AND tvshowlinkpath.idShow = tvshow.idShow' \
                         ' AND tvshowlinkpath.idPath = path.idPath'

            # set xml response format, if this fails then don't bother with the rest
            if not self._send(
                    host,
                {
                    'command':
                    'SetResponseFormat(webheader;false;webfooter;false;header;<xml>;footer;</xml>;'
                    'opentag;<tag>;closetag;</tag>;closefinaltag;false)'
                }):
                return False

            # sql used to grab path(s)
            response = self._send(
                host, {'command': 'QueryVideoDatabase(%s)' % path_sql})
            if not response:
                self._log_debug(u'Invalid response for %s on %s' %
                                (show_name, host))
                return False

            try:
                et = XmlEtree.fromstring(quote(response, ':\\/<>'))
            except SyntaxError as e:
                self._log_error(u'Unable to parse XML in response: %s' % ex(e))
                return False

            paths = et.findall('.//field')
            if not paths:
                self._log_debug(u'No valid path found for %s on %s' %
                                (show_name, host))
                return False

            for path in paths:
                # we do not need it double-encoded, gawd this is dumb
                un_enc_path = decode_str(unquote(path.text),
                                         sickbeard.SYS_ENCODING)
                self._log_debug(u'Updating %s on %s at %s' %
                                (show_name, host, un_enc_path))

                if not self._send(
                        host,
                        dict(command='ExecBuiltIn',
                             parameter='Kodi.updatelibrary(video, %s)' %
                             un_enc_path)):
                    self._log_error(
                        u'Update of show directory failed for %s on %s at %s' %
                        (show_name, host, un_enc_path))
                    return False

                # sleep for a few seconds just to be sure kodi has a chance to finish each directory
                if 1 < len(paths):
                    time.sleep(5)
        # do a full update if requested
        else:
            self._log_debug(u'Full library update on host: %s' % host)

            if not self._send(
                    host,
                    dict(command='ExecBuiltIn',
                         parameter='Kodi.updatelibrary(video)')):
                self._log_error(u'Failed full library update on: %s' % host)
                return False

        return True
Beispiel #5
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self.url:
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        quote_fx = (lambda t: quote(t, safe='~()*!.\''))
        for mode in search_params:
            for search_string in search_params[mode]:
                search_url = self.url
                cnt = len(items[mode])
                try:
                    for token in self._get_tokens():
                        if self.should_skip():
                            return results
                        if not token:
                            continue

                        params = dict(token=token[0], ent=token[1])
                        if 'Cache' != mode:
                            params.update(
                                {'ss': quote_fx(unidecode(search_string))})

                        data_json = None
                        vals = [i for i in range(3, 8)]
                        random.SystemRandom().shuffle(vals)
                        for x in vals[0], vals[2], vals[4]:
                            time.sleep(x)
                            params.update(dict(ts=self.ts()))
                            search_url = self.urls[
                                ('search', 'browse')['Cache' == mode]] % params
                            # decode json below as get resp will false -ve to 'nodata' when no search results
                            html_json = self.get_url(search_url)
                            if None is not html_json:
                                data_json = json.loads(html_json)
                                if data_json or 'Cache' != mode:
                                    break
                            if self.should_skip():
                                return results

                        for item in filter_iter(
                                lambda di: re.match(
                                    '(?i).*?(tv|television)',
                                    di.get('type', '') or di.get(
                                        'category', '')) and
                            (not self.confirmed or di.get('trusted') or di.get(
                                'verified')), data_json or {}):
                            seeders, leechers, size = map_list(
                                lambda arg: try_int(*([
                                    item.get(arg[0]) if None is not item.get(
                                        arg[0]) else item.get(arg[1])
                                ]) * 2),
                                (('seeder', 'seed'), ('leecher', 'leech'),
                                 ('size', 'size')))
                            if self._reject_item(seeders, leechers):
                                continue
                            title = item.get('name') or item.get('title')
                            download_url = item.get('magnet') or item.get(
                                'magnetLink')
                            if not download_url:
                                source = item.get('site') or item.get('source')
                                link = self._link(
                                    item.get('url') or item.get('pageLink'))
                                if not source or not link:
                                    continue
                                download_url = self.urls['get'] % dict(
                                    token=token[0],
                                    src=quote_fx(source),
                                    url=b64encodestring(quote_fx(link)),
                                    ts='%(ts)s')
                            if title and download_url:
                                items[mode].append(
                                    (title, download_url, seeders, size))

                except generic.HaltParseException:
                    pass
                except (BaseException, Exception):
                    logger.log(
                        u'Failed to parse. Traceback: %s' %
                        traceback.format_exc(), logger.ERROR)

                self._log_search(mode, len(items[mode]) - cnt, search_url)

            results = self._sort_seeding(mode, results + items[mode])

        return results
Beispiel #6
0
    def _authorised(self, **kwargs):
        result = False
        if self.digest and 'None' not in self.digest and 'login_chk' in self.urls:
            digest = [x[::-1] for x in self.digest[::-1].rpartition('=')]
            self.digest = digest[2] + digest[1] + quote(unquote(digest[0]))
            self.session.cookies = cookiejar_from_dict(
                dict({digest[2]: quote(unquote(digest[0]))}))
            html = self.get_url(self.urls['login_chk'], skip_auth=True)
            result = html and 'RSS' in html and 'type="password"' not in html

        if not result and not self.failure_count:
            if self.url and self.digest:
                self.get_url(self.urls['logout'],
                             skip_auth=True,
                             post_data={
                                 'submit.x': 24,
                                 'submit.y': 11
                             })
            self.digest = ''
            self.session.cookies.clear()
            json = self.get_url(self.urls['login_1'],
                                skip_auth=True,
                                post_data={'username': self.username},
                                parse_json=True)
            resp = filter_list(lambda l: isinstance(l, list),
                               json.get('Fs', []))

            def get_html(_resp):
                for cur_item in _resp:
                    if isinstance(cur_item, list):
                        _html = filter_list(
                            lambda s: isinstance(s, string_types) and
                            'password' in s, cur_item)
                        if not _html:
                            _html = get_html(cur_item)
                        if _html:
                            return _html

            params = {}
            html = get_html(resp)
            if html:
                tags = re.findall(r'(?is)(<input[^>]*?name=[\'"][^\'"]+[^>]*)',
                                  html[0])
                attrs = [[(re.findall(r'(?is)%s=[\'"]([^\'"]+)' % attr, x)
                           or [''])[0] for attr in ['type', 'name', 'value']]
                         for x in tags]
                for itype, name, value in attrs:
                    if 'password' in [itype, name]:
                        params[name] = self.password
                    if name not in ('username',
                                    'password') and 'password' != itype:
                        params.setdefault(name, value)

            if params:
                html = self.get_url(self.urls['login_2'],
                                    skip_auth=True,
                                    post_data=params)
                if html and 'RSS' in html:
                    self.digest = None
                    if self.session.cookies.get('inSpeed_speedian'):
                        self.digest = 'inSpeed_speedian=%s' % self.session.cookies.get(
                            'inSpeed_speedian')
                    sickbeard.save_config()
                    result = True
                    logger.log('Cookie details for %s updated.' % self.name,
                               logger.DEBUG)
            elif not self.failure_count:
                logger.log(
                    'Invalid cookie details for %s and login failed. Check settings'
                    % self.name, logger.ERROR)
        return result
Beispiel #7
0
 def query(self,
           filepath,
           languages,
           keywords=None,
           series=None,
           season=None,
           episode=None,
           movie=None,
           year=None):
     if series and season and episode:
         request_series = series.lower().replace(' ', '_')
         if PY2 and isinstance(request_series, text_type):
             request_series = request_series.encode('utf-8')
         logger.debug(
             u'Getting subtitles for %s season %d episode %d with languages %r'
             % (series, season, episode, languages))
         r = self.session.get(
             '%s/serie/%s/%s/%s/' %
             (self.server_url, quote(request_series), season, episode))
         if r.status_code == 404:
             logger.debug(
                 u'Could not find subtitles for %s season %d episode %d with languages %r'
                 % (series, season, episode, languages))
             return []
     elif movie and year:
         request_movie = movie.title().replace(' ', '_')
         if PY2 and isinstance(request_movie, text_type):
             request_movie = request_movie.encode('utf-8')
         logger.debug(u'Getting subtitles for %s (%d) with languages %r' %
                      (movie, year, languages))
         r = self.session.get('%s/film/%s_(%d)' %
                              (self.server_url, quote(request_movie), year))
         if r.status_code == 404:
             logger.debug(
                 u'Could not find subtitles for %s (%d) with languages %r' %
                 (movie, year, languages))
             return []
     else:
         raise ServiceError('One or more parameter missing')
     if r.status_code != 200:
         logger.error(u'Request %s returned status code %d' %
                      (r.url, r.status_code))
         return []
     soup = BeautifulSoup(r.content, self.required_features)
     subtitles = []
     for sub in soup('td', {'class': 'NewsTitle'}):
         sub_keywords = split_keyword(sub.b.string.lower())
         if keywords and not keywords & sub_keywords:
             logger.debug(u'None of subtitle keywords %r in %r' %
                          (sub_keywords, keywords))
             continue
         for html_language in sub.parent.parent.find_all(
                 'td', {'class': 'language'}):
             language = self.get_language(html_language.string.strip())
             if language not in languages:
                 logger.debug(u'Language %r not in wanted languages %r' %
                              (language, languages))
                 continue
             html_status = html_language.find_next_sibling('td')
             status = html_status.strong.string.strip()
             if status != 'Completado':
                 logger.debug(u'Wrong subtitle status %s' % status)
                 continue
             path = get_subtitle_path(filepath, language, self.config.multi)
             subtitle = ResultSubtitle(
                 path, language, self.__class__.__name__.lower(),
                 '%s%s' % (self.server_url,
                           html_status.find_next('td').find('a')['href']))
             subtitles.append(subtitle)
     return subtitles
Beispiel #8
0
    def _update_library_http(self, host=None, show_name=None):
        """Handles updating XBMC host via HTTP API

        Attempts to update the XBMC video library for a specific tv show if passed,
        otherwise update the whole library if enabled.

        Args:
            host: XBMC webserver host:port
            show_name: Name of a TV show to specifically target the library update for

        Returns:
            Returns True or False

        """

        if not host:
            self._log_debug(u'No host passed, aborting update')
            return False

        self._log_debug(u'Updating XMBC library via HTTP method for host: ' +
                        host)

        # if we're doing per-show
        if show_name:
            self._log_debug(u'Updating library via HTTP method for show ' +
                            show_name)

            # noinspection SqlResolve
            path_sql = 'select path.strPath' \
                       ' from path, tvshow, tvshowlinkpath' \
                       ' where tvshow.c00 = "%s"' \
                       ' and tvshowlinkpath.idShow = tvshow.idShow' \
                       ' and tvshowlinkpath.idPath = path.idPath' % show_name

            # use this to get xml back for the path lookups
            xml_command = dict(
                command=
                'SetResponseFormat(webheader;false;webfooter;false;header;<xml>;footer;</xml>;'
                'opentag;<tag>;closetag;</tag>;closefinaltag;false)')
            # sql used to grab path(s)
            sql_command = dict(command='QueryVideoDatabase(%s)' % path_sql)
            # set output back to default
            reset_command = dict(command='SetResponseFormat()')

            # set xml response format, if this fails then don't bother with the rest
            request = self._send_to_xbmc(xml_command, host)
            if not request:
                return False

            sql_xml = self._send_to_xbmc(sql_command, host)
            self._send_to_xbmc(reset_command, host)

            if not sql_xml:
                self._log_debug(u'Invalid response for ' + show_name + ' on ' +
                                host)
                return False

            enc_sql_xml = quote(sql_xml, ':\\/<>')
            try:
                et = XmlEtree.fromstring(enc_sql_xml)
            except SyntaxError as e:
                self._log_error(u'Unable to parse XML response: ' + ex(e))
                return False

            paths = et.findall('.//field')

            if not paths:
                self._log_debug(u'No valid paths found for ' + show_name +
                                ' on ' + host)
                return False

            for path in paths:
                # we do not need it double-encoded, gawd this is dumb
                un_enc_path = decode_str(unquote(path.text),
                                         sickbeard.SYS_ENCODING)
                self._log_debug(u'Updating ' + show_name + ' on ' + host +
                                ' at ' + un_enc_path)
                update_command = dict(
                    command='ExecBuiltIn',
                    parameter='XBMC.updatelibrary(video, %s)' % un_enc_path)
                request = self._send_to_xbmc(update_command, host)
                if not request:
                    self._log_error(u'Update of show directory failed on ' +
                                    show_name + ' on ' + host + ' at ' +
                                    un_enc_path)
                    return False
                # sleep for a few seconds just to be sure xbmc has a chance to finish each directory
                if 1 < len(paths):
                    time.sleep(5)
        # do a full update if requested
        else:
            self._log(u'Doing full library update on host: ' + host)
            update_command = {
                'command': 'ExecBuiltIn',
                'parameter': 'XBMC.updatelibrary(video)'
            }
            request = self._send_to_xbmc(update_command, host)

            if not request:
                self._log_error(u'Full Library update failed on: ' + host)
                return False

        return True