Exemplo n.º 1
0
    def parse(self, data, mode):
        """
        Parse search results for items.

        :param data: The raw response from a search
        :param mode: The current mode used to search, e.g. RSS

        :return: A list of items found
        """
        # Units
        units = ['B', 'KIB', 'MIB', 'GIB', 'TIB', 'PIB']

        items = []

        for item in data:
            try:
                title = item['title']
                download_url = item['link']
                if not all([title, download_url]):
                    continue

                seeders = try_int(item['nyaa_seeders'])
                leechers = try_int(item['nyaa_leechers'])

                # Filter unseeded torrent
                if seeders < self.minseed:
                    if mode != 'RSS':
                        log.debug(
                            "Discarding torrent because it doesn't meet the"
                            ' minimum seeders: {0}. Seeders: {1}', title,
                            seeders)
                    continue

                size = convert_size(item['nyaa_size'], default=-1, units=units)

                pubdate = self.parse_pubdate(item['published'])

                item = {
                    'title': title,
                    'link': download_url,
                    'size': size,
                    'seeders': seeders,
                    'leechers': leechers,
                    'pubdate': pubdate,
                }
                if mode != 'RSS':
                    log.debug(
                        'Found result: {0} with {1} seeders and {2} leechers',
                        title, seeders, leechers)

                items.append(item)
            except (AttributeError, TypeError, KeyError, ValueError,
                    IndexError):
                log.exception('Failed parsing provider.')

        return items
Exemplo n.º 2
0
    def parse(self, data, mode):
        """
        Parse search results for items.

        :param data: The raw response from a search
        :param mode: The current mode used to search, e.g. RSS

        :return: A list of items found
        """
        items = []
        torrent_rows = data.pop('torrents', {})

        # Skip column headers
        for row in torrent_rows:
            try:
                title = row.pop('title', '')
                info_hash = row.pop('infoHash', '')
                download_url = 'magnet:?xt=urn:btih:' + info_hash
                if not all([title, download_url, info_hash]):
                    continue

                swarm = row.pop('swarm', {})
                seeders = try_int(swarm.pop('seeders', 0))
                leechers = try_int(swarm.pop('leechers', 0))

                # Filter unseeded torrent
                if seeders < min(self.minseed, 1):
                    if mode != 'RSS':
                        log.debug(
                            "Discarding torrent because it doesn't meet the"
                            ' minimum seeders: {0}. Seeders: {1}', title,
                            seeders)
                    continue

                size = convert_size(row.pop('size', -1)) or -1

                item = {
                    'title': title,
                    'link': download_url,
                    'size': size,
                    'seeders': seeders,
                    'leechers': leechers,
                    'pubdate': None,
                }
                if mode != 'RSS':
                    log.debug(
                        'Found result: {0} with {1} seeders and {2} leechers',
                        title, seeders, leechers)

                items.append(item)
            except (AttributeError, TypeError, KeyError, ValueError,
                    IndexError):
                log.exception('Failed parsing provider.')

        return items
Exemplo n.º 3
0
    def parse(self, data, mode):
        """
        Parse search results for items.

        :param data: The raw response from a search
        :param mode: The current mode used to search, e.g. RSS

        :return: A list of items found
        """
        items = []
        data.get('data', '')
        torrent_rows = data.get('torrents', [])

        # Skip column headers
        for row in torrent_rows:
            try:
                title = row.pop('name', '')
                download_url = '{0}{1}'.format(
                    self.urls['download'],
                    urlencode({'id': row.pop('id', ''), 'passkey': self.passkey}))

                if not all([title, download_url]):
                    continue

                seeders = try_int(row.pop('seeders', 0))
                leechers = try_int(row.pop('leechers', 0))

                # Filter unseeded torrent
                if seeders < min(self.minseed, 1):
                    if mode != 'RSS':
                        log.debug("Discarding torrent because it doesn't meet the"
                                  " minimum seeders: {0}. Seeders: {1}",
                                  title, seeders)
                    continue

                size = convert_size(row.pop('size', -1), -1)

                item = {
                    'title': title,
                    'link': download_url,
                    'size': size,
                    'seeders': seeders,
                    'leechers': leechers,
                    'pubdate': None,
                }
                if mode != 'RSS':
                    log.debug('Found result: {0} with {1} seeders and {2} leechers',
                              title, seeders, leechers)

                items.append(item)
            except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                log.error('Failed parsing provider. Traceback: {0!r}',
                          traceback.format_exc())

        return items
Exemplo n.º 4
0
    def parse(self, data, mode):
        """
        Parse search results for items.

        :param data: The raw response from a search
        :param mode: The current mode used to search, e.g. RSS

        :return: A list of items found
        """
        # Units
        units = ['B', 'KIB', 'MIB', 'GIB', 'TIB', 'PIB']

        items = []

        for item in data:
            try:
                title = item['title']
                download_url = item['link']
                if not all([title, download_url]):
                    continue

                seeders = try_int(item['nyaa_seeders'])
                leechers = try_int(item['nyaa_leechers'])

                # Filter unseeded torrent
                if seeders < self.minseed:
                    if mode != 'RSS':
                        log.debug("Discarding torrent because it doesn't meet the"
                                  ' minimum seeders: {0}. Seeders: {1}',
                                  title, seeders)
                    continue

                size = convert_size(item['nyaa_size'], default=-1, units=units)

                pubdate = self.parse_pubdate(item['published'])

                item = {
                    'title': title,
                    'link': download_url,
                    'size': size,
                    'seeders': seeders,
                    'leechers': leechers,
                    'pubdate': pubdate,
                }
                if mode != 'RSS':
                    log.debug('Found result: {0} with {1} seeders and {2} leechers',
                              title, seeders, leechers)

                items.append(item)
            except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                log.exception('Failed parsing provider.')

        return items
Exemplo n.º 5
0
    def parse(self, data, mode):
        """
        Parse search results for items.

        :param data: The raw response from a search
        :param mode: The current mode used to search, e.g. RSS

        :return: A list of items found
        """
        items = []
        json_data = data.get('data', {})
        torrent_rows = json_data.get('torrents', [])

        for row in torrent_rows:
            try:
                title = row.pop('name', '')
                download_url = '{0}?{1}'.format(
                    self.urls['download'],
                    urlencode({'id': row.pop('id', ''), 'passkey': self.passkey}))

                if not all([title, download_url]):
                    continue

                seeders = try_int(row.pop('seeders', 0))
                leechers = try_int(row.pop('leechers', 0))

                # Filter unseeded torrent
                if seeders < self.minseed:
                    if mode != 'RSS':
                        log.debug("Discarding torrent because it doesn't meet the"
                                  ' minimum seeders: {0}. Seeders: {1}',
                                  title, seeders)
                    continue

                size = convert_size(row.pop('size', -1), -1)

                item = {
                    'title': title,
                    'link': download_url,
                    'size': size,
                    'seeders': seeders,
                    'leechers': leechers,
                    'pubdate': None,
                }
                if mode != 'RSS':
                    log.debug('Found result: {0} with {1} seeders and {2} leechers',
                              title, seeders, leechers)

                items.append(item)
            except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                log.exception('Failed parsing provider.')

        return items
Exemplo n.º 6
0
    def parse(self, data, mode):
        """
        Parse search results for items.

        :param data: The raw response from a search
        :param mode: The current mode used to search, e.g. RSS

        :return: A list of items found
        """
        items = []
        torrent_rows = data.pop('torrents', {})

        # Skip column headers
        for row in torrent_rows:
            try:
                title = row.pop('title', '')
                info_hash = row.pop('infoHash', '')
                download_url = 'magnet:?xt=urn:btih:' + info_hash
                if not all([title, download_url, info_hash]):
                    continue

                swarm = row.pop('swarm', {})
                seeders = try_int(swarm.pop('seeders', 0))
                leechers = try_int(swarm.pop('leechers', 0))

                # Filter unseeded torrent
                if seeders < self.minseed:
                    if mode != 'RSS':
                        log.debug("Discarding torrent because it doesn't meet the"
                                  ' minimum seeders: {0}. Seeders: {1}',
                                  title, seeders)
                    continue

                size = convert_size(row.pop('size', -1)) or -1

                item = {
                    'title': title,
                    'link': download_url,
                    'size': size,
                    'seeders': seeders,
                    'leechers': leechers,
                    'pubdate': None,
                }
                if mode != 'RSS':
                    log.debug('Found result: {0} with {1} seeders and {2} leechers',
                              title, seeders, leechers)

                items.append(item)
            except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                log.exception('Failed parsing provider.')

        return items
Exemplo n.º 7
0
    def _get_size(self, item):
        """
        Get size info from a result item.

        Returns int size or -1
        """
        return try_int(item.get('size', -1), -1)
Exemplo n.º 8
0
 def _get_size(self, item):
     """Get result size."""
     try:
         size = item.get('links')[1].get('length', -1)
     except (AttributeError, IndexError, TypeError):
         size = -1
     return try_int(size, -1)
Exemplo n.º 9
0
    def test_try_int_with_default(self):
        default_value = 42
        test_cases = {
            None: default_value,
            '': default_value,
            '123': 123,
            '-123': -123,
            '12.3': default_value,
            '-12.3': default_value,
            0: 0,
            123: 123,
            -123: -123,
            12.3: 12,
            -12.3: -12,
        }

        unicode_test_cases = {
            u'': default_value,
            u'123': 123,
            u'-123': -123,
            u'12.3': default_value,
            u'-12.3': default_value,
        }

        for test in test_cases, unicode_test_cases:
            for (candidate, result) in iteritems(test):
                self.assertEqual(try_int(candidate, default_value), result)
Exemplo n.º 10
0
    def test_try_int(self):
        test_cases = {
            None: 0,
            '': 0,
            '123': 123,
            '-123': -123,
            '12.3': 0,
            '-12.3': 0,
            0: 0,
            123: 123,
            -123: -123,
            12.3: 12,
            -12.3: -12,
        }

        unicode_test_cases = {
            u'': 0,
            u'123': 123,
            u'-123': -123,
            u'12.3': 0,
            u'-12.3': 0,
        }

        for test in test_cases, unicode_test_cases:
            for (candidate, result) in iteritems(test):
                self.assertEqual(try_int(candidate), result)
Exemplo n.º 11
0
    def _get_size(self, item):
        """
        Get size info from a result item.

        Returns int size or -1
        """
        return try_int(item.get('size', -1), -1)
Exemplo n.º 12
0
    def search(self, search_strings, age=0, ep_obj=None, **kwargs):
        """Start searching for anime using the provided search_strings. Used for backlog and daily."""
        results = []

        for mode in search_strings:
            items = []
            log.debug('Search mode: {0}', mode)

            for search_string in search_strings[mode]:

                if mode != 'RSS':
                    log.debug('Search string: {search}',
                              {'search': search_string})

                    search_url = (self.urls['rss'], self.urls['api'] +
                                  search_string)[mode != 'RSS']
                    response = self.session.get(search_url)
                    if not response or not response.text:
                        log.debug('No data returned from provider')
                        continue

                    if not response.text.startswith('<?xml'):
                        log.info(
                            'Expected xml but got something else, is your mirror failing?'
                        )
                        continue

                    with BS4Parser(response.text, 'html5lib') as html:
                        entries = html('item')
                        if not entries:
                            log.info('Returned xml contained no results')
                            continue

                        for item in entries:
                            try:
                                title = item.title.get_text(strip=True)
                                download_url = item.enclosure.get(
                                    'url').strip()
                                if not (title and download_url):
                                    continue

                                # description = item.find('description')
                                size = try_int(item.enclosure.get(
                                    'length', -1))

                                item = {
                                    'title': title,
                                    'link': download_url,
                                    'size': size,
                                }

                                items.append(item)
                            except (AttributeError, TypeError, KeyError,
                                    ValueError, IndexError):
                                log.exception('Failed parsing provider.')
                                continue

                results += items

            return results
Exemplo n.º 13
0
def test_try_int(value, expected):
    # Given

    # When
    actual = sut.try_int(value)

    # Then
    assert expected == actual
Exemplo n.º 14
0
    def _create_recommended_show(self, show_obj):
        """Create the RecommendedShow object from the returned showobj."""
        rec_show = RecommendedShow(
            self,
            show_obj['show']['ids'],
            show_obj['show']['title'],
            INDEXER_TVDBV2,  # indexer
            show_obj['show']['ids']['tvdb'],
            **{
                'rating':
                show_obj['show']['rating'],
                'votes':
                try_int(show_obj['show']['votes'], '0'),
                'image_href':
                'http://www.trakt.tv/shows/{0}'.format(
                    show_obj['show']['ids']['slug']),
                # Adds like: {'tmdb': 62126, 'tvdb': 304219, 'trakt': 79382, 'imdb': 'tt3322314',
                # 'tvrage': None, 'slug': 'marvel-s-luke-cage'}
                'ids':
                show_obj['show']['ids']
            })

        use_default = None
        image = None
        try:
            if not missing_posters.has(show_obj['show']['ids']['tvdb']):
                image = self.check_cache_for_poster(show_obj['show']['ids']['tvdb']) or \
                    self.tvdb_api_v2.config['session'].series_api.series_id_images_query_get(show_obj['show']['ids']['tvdb'],
                                                                                             key_type='poster').data[0].file_name
            else:
                log.info('CACHE: Missing poster on TVDB for show {0}',
                         show_obj['show']['title'])
                use_default = self.default_img_src
        except ApiException as error:
            use_default = self.default_img_src
            if getattr(error, 'status', None) == 404:
                log.info('Missing poster on TheTVDB for show {0}',
                         show_obj['show']['title'])
                missing_posters.append(show_obj['show']['ids']['tvdb'])
        except Exception as error:
            use_default = self.default_img_src
            log.debug('Missing poster on TheTVDB, cause: {0!r}', error)

        if image:
            rec_show.cache_image(
                'http://thetvdb.com/banners/{0}'.format(image),
                default=use_default)
        else:
            rec_show.cache_image('', default=use_default)

        # As the method below requires allot of resources, i've only enabled it when
        # the shows language or country is 'jp' (japanese). Looks a litle bit akward,
        # but alternative is allot of resource used
        if 'jp' in [show_obj['show']['country'], show_obj['show']['language']]:
            rec_show.check_if_anime(self.anidb,
                                    show_obj['show']['ids']['tvdb'])

        return rec_show
Exemplo n.º 15
0
    def vres(self):
        """
        The vertical found in the name.

        :returns: an empty string if not found
        """
        attr = 'res'
        match = self._get_match_obj(attr)
        return None if not match else try_int(match.group('vres'))
Exemplo n.º 16
0
    def vres(self):
        """
        The vertical found in the name

        :returns: an empty string if not found
        """
        attr = 'res'
        match = self._get_match_obj(attr)
        return None if not match else try_int(match.group('vres'))
Exemplo n.º 17
0
def change_SUBTITLES_FINDER_FREQUENCY(subtitles_finder_frequency):
    """
    Change frequency of subtitle thread

    :param subtitles_finder_frequency: New frequency
    """
    if subtitles_finder_frequency == '' or subtitles_finder_frequency is None:
        subtitles_finder_frequency = 1

    app.SUBTITLES_FINDER_FREQUENCY = try_int(subtitles_finder_frequency, 1)
Exemplo n.º 18
0
    def _get_size(self, item):
        """Get result size."""
        if isinstance(item, dict):
            size = item.get('size', -1)
        elif isinstance(item, (list, tuple)) and len(item) > 2:
            size = item[2]
        else:
            size = -1

        return try_int(size, -1)
Exemplo n.º 19
0
def minimax(val, default, low, high):
    """ Return value forced within range """

    val = try_int(val, default)

    if val < low:
        return low
    if val > high:
        return high

    return val
Exemplo n.º 20
0
    def saveProviders(self, provider_order, **kwargs):
        """Save Provider related settings."""
        newznab_string = kwargs.pop('newznab_string', '')
        torrentrss_string = kwargs.pop('torrentrss_string', '')
        torznab_string = kwargs.pop('torznab_string', '')

        self._save_newznab_providers(newznab_string)
        self._save_rsstorrent_providers(torrentrss_string)
        self._save_torznab_providers(torznab_string)

        def ordered_providers(names, providers):
            reminder = {}
            for name in names:
                for provider in providers:
                    reminder[provider.get_id()] = provider
                    if provider.get_id() == name:
                        yield provider
            else:
                rest = set(reminder).difference(set(names))
                for provider in rest:
                    yield reminder[provider]

        ordered_names = OrderedDict()
        provider_order_list = provider_order.split()
        for provider_setting in provider_order_list:
            cur_provider, cur_setting = provider_setting.split(':')
            enabled = try_int(cur_setting)
            ordered_names[cur_provider] = enabled

        providers_enabled = []
        providers_disabled = []
        all_providers = providers.sorted_provider_list()

        for provider in ordered_providers(ordered_names, all_providers):
            name = provider.get_id()
            if ordered_names.get(name):
                provider.enabled = True
                providers_enabled.append(name)
            else:
                provider.enabled = False
                providers_disabled.append(name)

            self._set_common_settings(provider, **kwargs)
            if isinstance(provider, TorrentProvider):
                self._set_torrent_settings(provider, **kwargs)

        app.PROVIDER_ORDER = providers_enabled + providers_disabled

        app.instance.save_config()

        ui.notifications.message('Configuration Saved',
                                 os.path.join(app.CONFIG_FILE))

        return self.redirect('/config/providers/')
Exemplo n.º 21
0
def change_UPDATE_FREQUENCY(freq):
    """
    Change frequency of daily updater thread

    :param freq: New frequency
    """
    app.UPDATE_FREQUENCY = try_int(freq, app.DEFAULT_UPDATE_FREQUENCY)

    if app.UPDATE_FREQUENCY < app.MIN_UPDATE_FREQUENCY:
        app.UPDATE_FREQUENCY = app.MIN_UPDATE_FREQUENCY

    app.version_check_scheduler.cycleTime = datetime.timedelta(hours=app.UPDATE_FREQUENCY)
Exemplo n.º 22
0
def change_TORRENT_CHECKER_FREQUENCY(freq):
    """
    Change frequency of Torrent Checker thread

    :param freq: New frequency
    """
    app.TORRENT_CHECKER_FREQUECY = try_int(freq, app.DEFAULT_TORRENT_CHECKER_FREQUENCY)

    if app.TORRENT_CHECKER_FREQUECY < app.MIN_TORRENT_CHECKER_FREQUENCY:
        app.TORRENT_CHECKER_FREQUECY = app.MIN_TORRENT_CHECKER_FREQUENCY

    app.torrent_checker_scheduler.cycleTime = datetime.timedelta(minutes=app.TORRENT_CHECKER_FREQUECY)
Exemplo n.º 23
0
def change_DAILYSEARCH_FREQUENCY(freq):
    """
    Change frequency of daily search thread

    :param freq: New frequency
    """
    app.DAILYSEARCH_FREQUENCY = try_int(freq, app.DEFAULT_DAILYSEARCH_FREQUENCY)

    if app.DAILYSEARCH_FREQUENCY < app.MIN_DAILYSEARCH_FREQUENCY:
        app.DAILYSEARCH_FREQUENCY = app.MIN_DAILYSEARCH_FREQUENCY

    app.daily_search_scheduler.cycleTime = datetime.timedelta(minutes=app.DAILYSEARCH_FREQUENCY)
Exemplo n.º 24
0
    def saveProviders(self, provider_order, **kwargs):
        """Save Provider related settings."""
        newznab_string = kwargs.pop('newznab_string', '')
        torrentrss_string = kwargs.pop('torrentrss_string', '')
        torznab_string = kwargs.pop('torznab_string', '')

        self._save_newznab_providers(newznab_string)
        self._save_rsstorrent_providers(torrentrss_string)
        self._save_torznab_providers(torznab_string)

        def ordered_providers(names, providers):
            reminder = {}
            for name in names:
                for provider in providers:
                    reminder[provider.get_id()] = provider
                    if provider.get_id() == name:
                        yield provider
            else:
                rest = set(reminder).difference(set(names))
                for provider in rest:
                    yield reminder[provider]

        ordered_names = OrderedDict()
        provider_order_list = provider_order.split()
        for provider_setting in provider_order_list:
            cur_provider, cur_setting = provider_setting.split(':')
            enabled = try_int(cur_setting)
            ordered_names[cur_provider] = enabled

        providers_enabled = []
        providers_disabled = []
        all_providers = providers.sorted_provider_list()

        for provider in ordered_providers(ordered_names, all_providers):
            name = provider.get_id()
            if ordered_names.get(name):
                provider.enabled = True
                providers_enabled.append(name)
            else:
                provider.enabled = False
                providers_disabled.append(name)

            self._set_common_settings(provider, **kwargs)
            if isinstance(provider, TorrentProvider):
                self._set_torrent_settings(provider, **kwargs)

        app.PROVIDER_ORDER = providers_enabled + providers_disabled

        app.instance.save_config()

        ui.notifications.message('Configuration Saved', os.path.join(app.CONFIG_FILE))

        return self.redirect('/config/providers/')
Exemplo n.º 25
0
def test_try_int_with_default(value, expected):
    # Given
    default_value = 42

    if callable(expected):
        expected = eval(expected())

    # When
    actual = sut.try_int(value, default_value)

    # Then
    assert expected == actual
Exemplo n.º 26
0
def change_BACKLOG_FREQUENCY(freq):
    """
    Change frequency of backlog thread

    :param freq: New frequency
    """
    app.BACKLOG_FREQUENCY = try_int(freq, app.DEFAULT_BACKLOG_FREQUENCY)

    app.MIN_BACKLOG_FREQUENCY = app.instance.get_backlog_cycle_time()
    if app.BACKLOG_FREQUENCY < app.MIN_BACKLOG_FREQUENCY:
        app.BACKLOG_FREQUENCY = app.MIN_BACKLOG_FREQUENCY

    app.backlog_search_scheduler.cycleTime = datetime.timedelta(minutes=app.BACKLOG_FREQUENCY)
Exemplo n.º 27
0
def change_AUTOPOSTPROCESSOR_FREQUENCY(freq):
    """
    Change frequency of automatic postprocessing thread
    TODO: Make all thread frequency changers in config.py return True/False status

    :param freq: New frequency
    """
    app.AUTOPOSTPROCESSOR_FREQUENCY = try_int(freq, app.DEFAULT_AUTOPOSTPROCESSOR_FREQUENCY)

    if app.AUTOPOSTPROCESSOR_FREQUENCY < app.MIN_AUTOPOSTPROCESSOR_FREQUENCY:
        app.AUTOPOSTPROCESSOR_FREQUENCY = app.MIN_AUTOPOSTPROCESSOR_FREQUENCY

    app.auto_post_processor_scheduler.cycleTime = datetime.timedelta(minutes=app.AUTOPOSTPROCESSOR_FREQUENCY)
Exemplo n.º 28
0
    def get(self, limit=100, action=None, show_obj=None):
        """
        :param limit: The maximum number of elements to return
        :param action: The type of action to filter in the history. Either 'downloaded' or 'snatched'. Anything else or
                        no value will return everything (up to ``limit``)
        :return: The last ``limit`` elements of type ``action`` in the history
        """

        # TODO: Make this a generator instead
        # TODO: Split compact and detailed into separate methods
        # TODO: Add a date limit as well
        # TODO: Clean up history.mako

        parsed_action = History._get_action(action)
        limit = max(try_int(limit), 0)

        common_sql = (
            'SELECT show_name, h.indexer_id, showid AS show_id, season, episode, action, h.quality, '
            'provider, resource, date, h.proper_tags, h.manually_searched '
            'FROM history h, tv_shows s '
            'WHERE h.showid = s.indexer_id AND h.indexer_id = s.indexer ')
        filter_sql = 'AND action = ? '
        order_sql = 'ORDER BY date DESC '

        show_params = []
        if show_obj:
            filter_sql += 'AND s.showid = ? AND s.indexer_id = ?'
            show_params += [show_obj.series_id, show_obj.indexer]

        if parsed_action:
            sql_results = self.db.select(common_sql + filter_sql + order_sql,
                                         [parsed_action] + show_params)
        else:
            sql_results = self.db.select(common_sql + order_sql, show_params)

        detailed = []
        compact = dict()

        # TODO: Convert to a defaultdict and compact items as needed
        # TODO: Convert to using operators to combine items
        for row in sql_results:
            row = History.Item(**row)
            if not limit or len(detailed) < limit:
                detailed.append(row)
            if row.index in compact:
                compact[row.index].actions.append(row.cur_action)
            elif not limit or len(compact) < limit:
                compact[row.index] = row.compacted()

        results = namedtuple('results', ['detailed', 'compact'])
        return results(detailed, list(itervalues(compact)))
Exemplo n.º 29
0
    def __init__(self, indexer_id, media_format='normal'):
        """
        Initialize media for a series.

        :param indexer_id: The indexer id of the show
        :param media_format: The format of the media to get. Must be either 'normal' or 'thumb'
        """

        self.indexer_id = try_int(indexer_id, 0)

        if media_format in ('normal', 'thumb'):
            self.media_format = media_format
        else:
            self.media_format = 'normal'
Exemplo n.º 30
0
    def _get_size(self, item):
        """Get result size."""
        if isinstance(item, dict):
            size = item.get('size', -1)
        elif isinstance(item, (list, tuple)) and len(item) > 2:
            size = item[2]
        else:
            size = -1

        # Make sure we didn't select seeds/leechers by accident
        if not size or size < 1024 * 1024:
            size = -1

        return try_int(size, -1)
Exemplo n.º 31
0
    def _create_recommended_show(self, show, subcat=None):
        """Create the RecommendedShow object from the returned showobj."""
        rec_show = RecommendedShow(
            self,
            show.trakt,
            show.title,
            **{'rating': show.ratings['rating'],
                'votes': try_int(show.ratings['votes'], '0'),
                'image_href': 'http://www.trakt.tv/shows/{0}'.format(show.ids['ids']['slug']),
                # Adds like: {'tmdb': 62126, 'tvdb': 304219, 'trakt': 79382, 'imdb': 'tt3322314',
                # 'tvrage': None, 'slug': 'marvel-s-luke-cage'}
                'ids': {f'{k}_id': v for k, v in iteritems(show.ids['ids']) if TRAKT_INDEXERS.get(k)},
                'subcat': subcat,
                'genres': [genre.lower() for genre in show.genres],
                'plot': show.overview
               }
        )

        use_default = None
        image = None
        try:
            if not missing_posters.has(show.tvdb):
                image = self.check_cache_for_poster(show.tvdb) or \
                    self.tvdb_api_v2.config['session'].series_api.series_id_images_query_get(
                        show.tvdb, key_type='poster').data[0].file_name
            else:
                log.info('CACHE: Missing poster on TVDB for show {0}', show.title)
                use_default = self.default_img_src
        except ApiException as error:
            use_default = self.default_img_src
            if getattr(error, 'status', None) == 404:
                log.info('Missing poster on TheTVDB for show {0}', show.title)
                missing_posters.append(show.tvdb)
        except Exception as error:
            use_default = self.default_img_src
            log.debug('Missing poster on TheTVDB, cause: {0!r}', error)

        image_url = ''
        if image:
            image_url = self.tvdb_api_v2.config['artwork_prefix'].format(image=image)

        rec_show.cache_image(image_url, default=use_default)

        # As the method below requires a lot of resources, i've only enabled it when
        # the shows language or country is 'jp' (japanese). Looks a litle bit akward,
        # but alternative is a lot of resource used
        if 'jp' in [show.country, show.language]:
            rec_show.flag_as_anime(show.tvdb)

        return rec_show
Exemplo n.º 32
0
def change_SHOWUPDATE_HOUR(freq):
    """
    Change frequency of show updater thread

    :param freq: New frequency
    """
    app.SHOWUPDATE_HOUR = try_int(freq, app.DEFAULT_SHOWUPDATE_HOUR)

    if app.SHOWUPDATE_HOUR > 23:
        app.SHOWUPDATE_HOUR = 0
    elif app.SHOWUPDATE_HOUR < 0:
        app.SHOWUPDATE_HOUR = 0

    app.show_update_scheduler.start_time = datetime.time(hour=app.SHOWUPDATE_HOUR)
Exemplo n.º 33
0
def parse_date_time(d, t, network):
    """
    Parse date and time string into local time

    :param d: date string
    :param t: time string
    :param network: network to use as base
    :return: datetime object containing local time
    """

    if not network_dict:
        load_network_dict()

    parsed_time = time_regex.search(t)
    network_tz = get_network_timezone(network, network_dict)

    hr = 0
    m = 0

    if parsed_time:
        hr = try_int(parsed_time.group('hour'))
        m = try_int(parsed_time.group('minute'))

        ap = parsed_time.group('meridiem')
        ap = ap[0].lower() if ap else ''

        if ap == 'a' and hr == 12:
            hr -= 12
        elif ap == 'p' and hr != 12:
            hr += 12

        hr = hr if 0 <= hr <= 23 else 0
        m = m if 0 <= m <= 59 else 0

    result = datetime.datetime.fromordinal(max(try_int(d), 1))

    return result.replace(hour=hr, minute=m, tzinfo=network_tz)
Exemplo n.º 34
0
def parse_date_time(d, t, network):
    """
    Parse date and time string into local time.

    :param d: date string
    :param t: time string
    :param network: network to use as base
    :return: datetime object containing local time
    """
    if not network_dict:
        load_network_dict()

    parsed_time = time_regex.search(t)
    network_tz = get_network_timezone(network, network_dict)

    hr = 0
    m = 0

    if parsed_time:
        hr = try_int(parsed_time.group('hour'))
        m = try_int(parsed_time.group('minute'))

        ap = parsed_time.group('meridiem')
        ap = ap[0].lower() if ap else ''

        if ap == 'a' and hr == 12:
            hr -= 12
        elif ap == 'p' and hr != 12:
            hr += 12

        hr = hr if 0 <= hr <= 23 else 0
        m = m if 0 <= m <= 59 else 0

    result = datetime.datetime.fromordinal(max(try_int(d), 1))

    return result.replace(hour=hr, minute=m, tzinfo=network_tz)
Exemplo n.º 35
0
    def _create_recommended_show(self, storage_key, series):
        """Create the RecommendedShow object from the returned showobj."""
        rec_show = RecommendedShow(
            self,
            series['show']['ids'], series['show']['title'],
            INDEXER_TVDBV2,  # indexer
            series['show']['ids']['tvdb'],
            **{'rating': series['show']['rating'],
                'votes': try_int(series['show']['votes'], '0'),
                'image_href': 'http://www.trakt.tv/shows/{0}'.format(series['show']['ids']['slug']),
                # Adds like: {'tmdb': 62126, 'tvdb': 304219, 'trakt': 79382, 'imdb': 'tt3322314',
                # 'tvrage': None, 'slug': 'marvel-s-luke-cage'}
                'ids': series['show']['ids']
               }
        )

        use_default = None
        image = None
        try:
            if not missing_posters.has(series['show']['ids']['tvdb']):
                image = self.check_cache_for_poster(series['show']['ids']['tvdb']) or \
                    self.tvdb_api_v2.config['session'].series_api.series_id_images_query_get(
                        series['show']['ids']['tvdb'], key_type='poster').data[0].file_name
            else:
                log.info('CACHE: Missing poster on TVDB for show {0}', series['show']['title'])
                use_default = self.default_img_src
        except ApiException as error:
            use_default = self.default_img_src
            if getattr(error, 'status', None) == 404:
                log.info('Missing poster on TheTVDB for show {0}', series['show']['title'])
                missing_posters.append(series['show']['ids']['tvdb'])
        except Exception as error:
            use_default = self.default_img_src
            log.debug('Missing poster on TheTVDB, cause: {0!r}', error)

        image_url = ''
        if image:
            image_url = self.tvdb_api_v2.config['artwork_prefix'].format(image=image)

        rec_show.cache_image(image_url, default=use_default)

        # As the method below requires a lot of resources, i've only enabled it when
        # the shows language or country is 'jp' (japanese). Looks a litle bit akward,
        # but alternative is a lot of resource used
        if 'jp' in [series['show']['country'], series['show']['language']]:
            rec_show.flag_as_anime(series['show']['ids']['tvdb'])

        return rec_show
Exemplo n.º 36
0
def change_DOWNLOAD_HANDLER_FREQUENCY(freq):
    """
    Change frequency of Download Handler thread.

    :param freq: New frequency
    """
    if app._DOWNLOAD_HANDLER_FREQUENCY == freq:
        return

    app._DOWNLOAD_HANDLER_FREQUENCY = try_int(freq, app.DEFAULT_DOWNLOAD_HANDLER_FREQUENCY)

    if app._DOWNLOAD_HANDLER_FREQUENCY < app.MIN_DOWNLOAD_HANDLER_FREQUENCY:
        app._DOWNLOAD_HANDLER_FREQUENCY = app.MIN_DOWNLOAD_HANDLER_FREQUENCY

    if app.download_handler_scheduler:
        app.download_handler_scheduler.cycleTime = datetime.timedelta(minutes=app._DOWNLOAD_HANDLER_FREQUENCY)
Exemplo n.º 37
0
def which_type(path):
    """
    Analyze image and attempt to determine its type.

    :param path: full path to the image
    :return: artwork type if detected, or None
    """
    if not os.path.isfile(path):
        log.warning('Could not check type, file does not exist: {0}', path)
        return

    if not try_int(os.path.getsize(path)):
        log.warning('Deleting 0 byte image: {0}', path)
        try:
            os.remove(path)
        except OSError as error:
            log.warning(
                'Failed to delete file: {path}. Please delete it manually.'
                ' Error: {msg}', {
                    'path': path,
                    'msg': error
                })
            return

    image_dimension = get_image_size(path)
    if not image_dimension:
        log.debug('Skipping image. Unable to get metadata from {0}', path)
        return

    height, width = image_dimension
    if not width or not height:
        log.debug('Skipping image. zero width or height {0}', path)
        return

    aspect_ratio = width / height
    log.debug('Image aspect ratio: {0}', aspect_ratio)

    for img_type in ASPECT_RATIOS:
        min_ratio, median_ratio, max_ratio = ASPECT_RATIOS[img_type]
        if min_ratio < aspect_ratio < max_ratio:
            log.debug('{image} detected based on aspect ratio.',
                      {'image': IMAGE_TYPES[img_type]})
            return img_type
    else:
        log.warning('Aspect ratio ({0}) does not match any known types.',
                    aspect_ratio)
        return
Exemplo n.º 38
0
    def index(self, limit=None):
        if limit is None:
            if app.HISTORY_LIMIT:
                limit = int(app.HISTORY_LIMIT)
            else:
                limit = 100
        else:
            limit = try_int(limit, 100)

        app.HISTORY_LIMIT = limit

        app.instance.save_config()

        history = self.history.get(limit)

        t = PageTemplate(rh=self, filename='history.mako')
        return t.render(historyResults=history.detailed, compactResults=history.compact, limit=limit,
                        controller='history', action='index')
Exemplo n.º 39
0
def which_type(path):
    """
    Analyze image and attempt to determine its type.

    :param path: full path to the image
    :return: artwork type if detected, or None
    """
    if not os.path.isfile(path):
        log.warning('Could not check type, file does not exist: {0}', path)
        return

    if not try_int(os.path.getsize(path)):
        log.warning('Deleting 0 byte image: {0}', path)
        try:
            os.remove(path)
        except OSError as error:
            log.warning(
                'Failed to delete file: {path}. Please delete it manually.'
                ' Error: {msg}', {'path': path, 'msg': error})
            return

    image_dimension = get_image_size(path)
    if not image_dimension:
        log.debug('Skipping image. Unable to get metadata from {0}', path)
        return

    height, width = image_dimension
    aspect_ratio = width / height
    log.debug('Image aspect ratio: {0}', aspect_ratio)

    for img_type in ASPECT_RATIOS:
        min_ratio, median_ratio, max_ratio = ASPECT_RATIOS[img_type]
        if min_ratio < aspect_ratio < max_ratio:
            log.debug('{image} detected based on aspect ratio.',
                      {'image': IMAGE_TYPES[img_type]})
            return img_type
    else:
        log.warning('Aspect ratio ({0}) does not match any known types.',
                    aspect_ratio)
        return
Exemplo n.º 40
0
    def index(self, limit=None):
        if limit is None:
            if app.HISTORY_LIMIT:
                limit = int(app.HISTORY_LIMIT)
            else:
                limit = 100
        else:
            limit = try_int(limit, 100)

        app.HISTORY_LIMIT = limit

        app.instance.save_config()

        history = self.history.get(limit)

        t = PageTemplate(rh=self, filename='history.mako')
        submenu = [
            {'title': 'Clear History', 'path': 'history/clearHistory', 'icon': 'ui-icon ui-icon-trash', 'class': 'clearhistory', 'confirm': True},
            {'title': 'Trim History', 'path': 'history/trimHistory', 'icon': 'menu-icon-cut', 'class': 'trimhistory', 'confirm': True},
        ]

        return t.render(historyResults=history.detailed, compactResults=history.compact, limit=limit,
                        submenu=submenu[::-1], controller='history', action='index')
Exemplo n.º 41
0
    def parse(self, data, mode):
        """
        Parse search results for items.

        :param data: The raw response from a search
        :param mode: The current mode used to search, e.g. RSS

        :return: A list of items found
        """
        items = []

        with BS4Parser(data, 'html5lib') as html:

            rows = html('item')
            if not rows:
                log.debug(
                    'No results returned from provider. Check chosen Newznab search categories'
                    ' in provider settings and/or usenet retention')
                return items

            try:
                self.torznab = 'xmlns:torznab' in html.rss.attrs
            except AttributeError:
                self.torznab = False

            for item in rows:
                try:
                    title = item.title.get_text(strip=True)
                    download_url = None

                    if item.enclosure:
                        url = item.enclosure.get('url', '').strip()
                        if url.startswith('magnet:'):
                            download_url = url
                        elif validators.url(url):
                            download_url = url
                            # Jackett needs extension added (since v0.8.396)
                            if not url.endswith('.torrent'):
                                content_type = item.enclosure.get('type', '')
                                if content_type == 'application/x-bittorrent':
                                    download_url = '{0}{1}'.format(url, '.torrent')

                    if not download_url and item.link:
                        url = item.link.get_text(strip=True)
                        if validators.url(url) or url.startswith('magnet:'):
                            download_url = url

                        if not download_url:
                            url = item.link.next.strip()
                            if validators.url(url) or url.startswith('magnet:'):
                                download_url = url

                    if not (title and download_url):
                        continue

                    seeders = leechers = -1
                    if 'gingadaddy' in self.url:
                        size_regex = re.search(r'\d*.?\d* [KMGT]B', str(item.description))
                        item_size = size_regex.group() if size_regex else -1
                    else:
                        item_size = item.size.get_text(strip=True) if item.size else -1
                        # Use regex to find name-spaced tags
                        # see BeautifulSoup4 bug 1720605
                        # https://bugs.launchpad.net/beautifulsoup/+bug/1720605
                        newznab_attrs = item(re.compile('newznab:attr'))
                        torznab_attrs = item(re.compile('torznab:attr'))
                        for attr in newznab_attrs + torznab_attrs:
                            item_size = attr['value'] if attr['name'] == 'size' else item_size
                            seeders = try_int(attr['value']) if attr['name'] == 'seeders' else seeders
                            peers = try_int(attr['value']) if attr['name'] == 'peers' else None
                            leechers = peers - seeders if peers else leechers

                    if not item_size or (self.torznab and (seeders == -1 or leechers == -1)):
                        continue

                    size = convert_size(item_size) or -1

                    pubdate_raw = item.pubdate.get_text(strip=True)
                    pubdate = self.parse_pubdate(pubdate_raw)

                    item = {
                        'title': title,
                        'link': download_url,
                        'size': size,
                        'seeders': seeders,
                        'leechers': leechers,
                        'pubdate': pubdate,
                    }
                    if mode != 'RSS':
                        if seeders == -1:
                            log.debug('Found result: {0}', title)
                        else:
                            log.debug('Found result: {0} with {1} seeders and {2} leechers',
                                      title, seeders, leechers)

                    items.append(item)
                except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                    log.exception('Failed parsing provider.')

        return items
Exemplo n.º 42
0
    def massEditSubmit(self, paused=None, default_ep_status=None, dvd_order=None,
                       anime=None, sports=None, scene=None, season_folders=None, quality_preset=None,
                       subtitles=None, air_by_date=None, allowed_qualities=None, preferred_qualities=None, toEdit=None, *args,
                       **kwargs):
        allowed_qualities = allowed_qualities or []
        preferred_qualities = preferred_qualities or []

        dir_map = {}
        for cur_arg in kwargs:
            if not cur_arg.startswith('orig_root_dir_'):
                continue
            which_index = cur_arg.replace('orig_root_dir_', '')
            end_dir = kwargs['new_root_dir_{index}'.format(index=which_index)]
            dir_map[kwargs[cur_arg]] = end_dir

        series_slugs = toEdit.split('|') if toEdit else []
        errors = 0
        for series_slug in series_slugs:
            identifier = SeriesIdentifier.from_slug(series_slug)
            series_obj = Series.find_by_identifier(identifier)

            if not series_obj:
                continue

            cur_root_dir = os.path.dirname(series_obj._location)
            cur_show_dir = os.path.basename(series_obj._location)
            if cur_root_dir in dir_map and cur_root_dir != dir_map[cur_root_dir]:
                new_show_dir = os.path.join(dir_map[cur_root_dir], cur_show_dir)
                logger.log(u'For show {show.name} changing dir from {show._location} to {location}'.format
                           (show=series_obj, location=new_show_dir))
            else:
                new_show_dir = series_obj._location

            if paused == 'keep':
                new_paused = series_obj.paused
            else:
                new_paused = True if paused == 'enable' else False
            new_paused = 'on' if new_paused else 'off'

            if default_ep_status == 'keep':
                new_default_ep_status = series_obj.default_ep_status
            else:
                new_default_ep_status = default_ep_status

            if anime == 'keep':
                new_anime = series_obj.anime
            else:
                new_anime = True if anime == 'enable' else False
            new_anime = 'on' if new_anime else 'off'

            if sports == 'keep':
                new_sports = series_obj.sports
            else:
                new_sports = True if sports == 'enable' else False
            new_sports = 'on' if new_sports else 'off'

            if scene == 'keep':
                new_scene = series_obj.is_scene
            else:
                new_scene = True if scene == 'enable' else False
            new_scene = 'on' if new_scene else 'off'

            if air_by_date == 'keep':
                new_air_by_date = series_obj.air_by_date
            else:
                new_air_by_date = True if air_by_date == 'enable' else False
            new_air_by_date = 'on' if new_air_by_date else 'off'

            if dvd_order == 'keep':
                new_dvd_order = series_obj.dvd_order
            else:
                new_dvd_order = True if dvd_order == 'enable' else False
            new_dvd_order = 'on' if new_dvd_order else 'off'

            if season_folders == 'keep':
                new_season_folders = series_obj.season_folders
            else:
                new_season_folders = True if season_folders == 'enable' else False
            new_season_folders = 'on' if new_season_folders else 'off'

            if subtitles == 'keep':
                new_subtitles = series_obj.subtitles
            else:
                new_subtitles = True if subtitles == 'enable' else False

            new_subtitles = 'on' if new_subtitles else 'off'

            if quality_preset == 'keep':
                allowed_qualities, preferred_qualities = series_obj.current_qualities
            elif try_int(quality_preset, None):
                preferred_qualities = []

            exceptions_list = []

            errors += self.editShow(identifier.indexer.slug, identifier.id, new_show_dir, allowed_qualities,
                                    preferred_qualities, exceptions_list,
                                    defaultEpStatus=new_default_ep_status,
                                    season_folders=new_season_folders,
                                    paused=new_paused, sports=new_sports, dvd_order=new_dvd_order,
                                    subtitles=new_subtitles, anime=new_anime,
                                    scene=new_scene, air_by_date=new_air_by_date,
                                    directCall=True)

        if errors:
            ui.notifications.error('Errors', '{num} error{s} while saving changes. Please check logs'.format
                                   (num=errors, s='s' if errors > 1 else ''))

        return self.redirect('/manage/')
Exemplo n.º 43
0
    def parse(self, data, mode):
        """
        Parse search results for items.

        :param data: The raw response from a search
        :param mode: The current mode used to search, e.g. RSS

        :return: A list of items found
        """
        items = []

        with BS4Parser(data, 'html5lib') as html:
            if not html:
                log.debug('No html data parsed from provider')
                return items

            torrents = html('tr')
            if not torrents or len(torrents) < 2:
                log.debug('Data returned from provider does not contain any torrents')
                return items

            # Skip column headers
            for row in torrents[1:]:
                # Skip extraneous rows at the end
                if len(row.contents) < 10:
                    continue

                try:
                    comments_counter = row.find_all('td', class_='lista', attrs={'align': 'center'})[4].find('a')
                    if comments_counter:
                        title = comments_counter['title'][10:]
                    else:
                        title = row.find('td', class_='lista', attrs={'align': 'left'}).find('a').get_text()
                    dl_href = row.find('td', class_='lista', attrs={'width': '20',
                                                                    'style': 'text-align: center;'}).find('a').get('href')
                    download_url = urljoin(self.url, dl_href)
                    if not all([title, dl_href]):
                        continue

                    seeders = try_int(row.find('span', class_='seedy').find('a').get_text(), 1)
                    leechers = try_int(row.find('span', class_='leechy').find('a').get_text())

                    # Filter unseeded torrent
                    if seeders < self.minseed:
                        if mode != 'RSS':
                            log.debug("Discarding torrent because it doesn't meet the"
                                      ' minimum seeders: {0}. Seeders: {1}',
                                      title, seeders)
                        continue

                    torrent_size = row.find('td', class_='lista222', attrs={'width': '100%'}).get_text()
                    size = convert_size(torrent_size) or -1

                    pubdate_td = row.find_all('td', class_='lista', attrs={'align': 'center'})[3]
                    pubdate_human_offset = pubdate_td.find('b')
                    if pubdate_human_offset:
                        time_search = re.search('([0-9:]+)', pubdate_td.get_text())
                        pubdate_raw = pubdate_human_offset.get_text() + ' at ' + time_search.group(1)
                    else:
                        pubdate_raw = pubdate_td.get_text()

                    pubdate = self.parse_pubdate(pubdate_raw)

                    item = {
                        'title': title,
                        'link': download_url,
                        'size': size,
                        'seeders': seeders,
                        'leechers': leechers,
                        'pubdate': pubdate,
                    }
                    log.debug('Found result: {0} with {1} seeders and {2} leechers',
                              title, seeders, leechers)

                    items.append(item)
                except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                    log.exception('Failed parsing provider.')

        return items
Exemplo n.º 44
0
    def addNewShow(self, whichSeries=None, indexer_lang=None, rootDir=None, defaultStatus=None, quality_preset=None,
                   allowed_qualities=None, preferred_qualities=None, season_folders=None, subtitles=None,
                   fullShowPath=None, other_shows=None, skipShow=None, providedIndexer=None, anime=None,
                   scene=None, blacklist=None, whitelist=None, defaultStatusAfter=None):
        """
        Receive tvdb id, dir, and other options and create a show from them. If extra show dirs are
        provided then it forwards back to newShow, if not it goes to /home.
        """
        provided_indexer = providedIndexer

        indexer_lang = app.INDEXER_DEFAULT_LANGUAGE if not indexer_lang else indexer_lang

        # grab our list of other dirs if given
        if not other_shows:
            other_shows = []
        elif not isinstance(other_shows, list):
            other_shows = [other_shows]

        other_shows = decode_shows(other_shows)

        def finishAddShow():
            # if there are no extra shows then go home
            if not other_shows:
                return json_response(redirect='/home/')

            # go to add the next show
            return json_response(
                redirect='/addShows/newShow/',
                params=[
                    ('show_to_add' if not i else 'other_shows', cur_dir)
                    for i, cur_dir in enumerate(other_shows)
                ]
            )

        # if we're skipping then behave accordingly
        if skipShow:
            return finishAddShow()

        # sanity check on our inputs
        if (not rootDir and not fullShowPath) or not whichSeries:
            error_msg = 'Missing params, no Indexer ID or folder: {series!r} and {root!r}/{path!r}'.format(
                series=whichSeries, root=rootDir, path=fullShowPath)
            log.error(error_msg)
            return json_response(
                result=False,
                message=error_msg,
                redirect='/home/'
            )

        # figure out what show we're adding and where
        series_pieces = whichSeries.split('|')
        if (whichSeries and rootDir) or (whichSeries and fullShowPath and len(series_pieces) > 1):
            if len(series_pieces) < 6:
                log.error('Unable to add show due to show selection. Not enough arguments: {pieces!r}',
                          {'pieces': series_pieces})
                ui.notifications.error('Unknown error. Unable to add show due to problem with show selection.')
                return json_response(
                    result=False,
                    message='Unable to add show due to show selection. Not enough arguments: {0!r}'.format(series_pieces),
                    redirect='/addShows/existingShows/'
                )

            indexer = int(series_pieces[1])
            indexer_id = int(series_pieces[3])
            show_name = series_pieces[4]
        else:
            # if no indexer was provided use the default indexer set in General settings
            if not provided_indexer:
                provided_indexer = app.INDEXER_DEFAULT

            indexer = int(provided_indexer)
            indexer_id = int(whichSeries)
            show_name = os.path.basename(os.path.normpath(fullShowPath))

        # use the whole path if it's given, or else append the show name to the root dir to get the full show path
        if fullShowPath:
            show_dir = os.path.normpath(fullShowPath)
        else:
            show_dir = os.path.join(rootDir, sanitize_filename(show_name))

        # blanket policy - if the dir exists you should have used 'add existing show' numbnuts
        if os.path.isdir(show_dir) and not fullShowPath:
            ui.notifications.error('Unable to add show', 'Folder {path} exists already'.format(path=show_dir))
            return json_response(
                result=False,
                message='Unable to add show: Folder {path} exists already'.format(path=show_dir),
                redirect='/addShows/existingShows/'
            )

        # don't create show dir if config says not to
        if app.ADD_SHOWS_WO_DIR:
            log.info('Skipping initial creation of {path} due to config.ini setting',
                     {'path': show_dir})
        else:
            dir_exists = helpers.make_dir(show_dir)
            if not dir_exists:
                log.error("Unable to create the folder {path}, can't add the show",
                          {'path': show_dir})
                ui.notifications.error('Unable to add show',
                                       'Unable to create the folder {path}, can\'t add the show'.format(path=show_dir))
                # Don't redirect to default page because user wants to see the new show
                return json_response(
                    result=False,
                    message='Unable to add show: Unable to create the folder {path}'.format(path=show_dir),
                    redirect='/home/'
                )
            else:
                helpers.chmod_as_parent(show_dir)

        # prepare the inputs for passing along
        scene = config.checkbox_to_value(scene)
        anime = config.checkbox_to_value(anime)
        season_folders = config.checkbox_to_value(season_folders)
        subtitles = config.checkbox_to_value(subtitles)

        if whitelist:
            if not isinstance(whitelist, list):
                whitelist = [whitelist]
            whitelist = short_group_names(whitelist)
        if blacklist:
            if not isinstance(blacklist, list):
                blacklist = [blacklist]
            blacklist = short_group_names(blacklist)

        if not allowed_qualities:
            allowed_qualities = []
        if not preferred_qualities or try_int(quality_preset, None):
            preferred_qualities = []
        if not isinstance(allowed_qualities, list):
            allowed_qualities = [allowed_qualities]
        if not isinstance(preferred_qualities, list):
            preferred_qualities = [preferred_qualities]
        new_quality = Quality.combine_qualities([int(q) for q in allowed_qualities], [int(q) for q in preferred_qualities])

        # add the show
        app.show_queue_scheduler.action.addShow(indexer, indexer_id, show_dir, int(defaultStatus), new_quality,
                                                season_folders, indexer_lang, subtitles, anime,
                                                scene, None, blacklist, whitelist, int(defaultStatusAfter))
        ui.notifications.message('Show added', 'Adding the specified show into {path}'.format(path=show_dir))

        return finishAddShow()
Exemplo n.º 45
0
    def parse(self, data, mode):
        """
        Parse search results for items.

        :param data: The raw response from a search
        :param mode: The current mode used to search, e.g. RSS

        :return: A list of items found
        """
        items = []

        with BS4Parser(data, 'html5lib') as html:
            # Continue only if at least one release is found
            empty = html.find('h2', text='No .torrents fit this filter criteria')
            if empty:
                log.debug('Data returned from provider does not contain any torrents')
                return items

            torrent_table = html.find('table', attrs={'style': 'border: none; width: 100%;'})
            torrent_rows = torrent_table('tr', class_='browse') if torrent_table else []

            for row in torrent_rows:
                cells = row('td')

                try:
                    title = cells[1].find('a').get('title')
                    torrent_url = cells[2].find('a').get('href')
                    download_url = urljoin(self.url, torrent_url)
                    if not all([title, torrent_url]):
                        continue

                    seeders = try_int(cells[9].get_text(), 1)
                    leechers = try_int(cells[10].get_text())

                    # Filter unseeded torrent
                    if seeders < self.minseed:
                        if mode != 'RSS':
                            log.debug("Discarding torrent because it doesn't meet the"
                                      ' minimum seeders: {0}. Seeders: {1}',
                                      title, seeders)
                        continue

                    torrent_size = self._norm_size(cells[7].get_text(strip=True))
                    size = convert_size(torrent_size) or -1

                    item = {
                        'title': title,
                        'link': download_url,
                        'size': size,
                        'seeders': seeders,
                        'leechers': leechers,
                        'pubdate': None,
                    }
                    if mode != 'RSS':
                        log.debug('Found result: {0} with {1} seeders and {2} leechers',
                                  title, seeders, leechers)

                    items.append(item)
                except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                    log.exception('Failed parsing provider.')

        return items
Exemplo n.º 46
0
    def parse(self, data, mode):
        """
        Parse search results for items.

        :param data: The raw response from a search
        :param mode: The current mode used to search, e.g. RSS

        :return: A list of items found
        """
        items = []

        with BS4Parser(data, 'html5lib') as html:
            torrent_table = html.find('table', border='1')
            torrent_rows = torrent_table('tr') if torrent_table else []

            # Continue only if at least one release is found
            if len(torrent_rows) < 2:
                log.debug('Data returned from provider does not contain any torrents')
                return items

            # "Type", "Name", Files", "Comm.", "Added", "TTL", "Size", "Snatched", "Seeders", "Leechers"
            labels = [label.get_text(strip=True) for label in torrent_rows[0]('td')]

            # Skip column headers
            for row in torrent_rows[1:]:
                cells = row('td')

                if len(cells) < len(labels):
                    continue

                try:
                    download_url = urljoin(self.url, cells[labels.index('Name')].find('a',
                                           href=re.compile(r'download.php\?id='))['href'])
                    title_element = cells[labels.index('Name')].find('a', href=re.compile(r'details.php\?id='))
                    title = title_element.get('title', '') or title_element.get_text(strip=True)
                    if not all([title, download_url]):
                        continue

                    # Free leech torrents are marked with green [F L] in the title
                    # (i.e. <font color=green>[F&nbsp;L]</font>)
                    freeleech = cells[labels.index('Name')].find('font', color='green')
                    if freeleech:
                        # \xa0 is a non-breaking space in Latin1 (ISO 8859-1)
                        freeleech_tag = '[F\xa0L]'
                        title = title.replace(freeleech_tag, '')
                        if self.freeleech and freeleech.get_text(strip=True) != freeleech_tag:
                            continue

                    seeders = try_int(cells[labels.index('Seeders')].get_text(strip=True), 1)
                    leechers = try_int(cells[labels.index('Leechers')].get_text(strip=True))

                    # Filter unseeded torrent
                    if seeders < self.minseed:
                        if mode != 'RSS':
                            log.debug("Discarding torrent because it doesn't meet the"
                                      ' minimum seeders: {0}. Seeders: {1}',
                                      title, seeders)
                        continue

                    torrent_size = cells[labels.index('Size')].get_text(' ', strip=True)
                    size = convert_size(torrent_size) or -1

                    pubdate_raw = cells[labels.index('Added')].get_text(' ', strip=True)
                    pubdate = self.parse_pubdate(pubdate_raw)

                    item = {
                        'title': title,
                        'link': download_url,
                        'size': size,
                        'seeders': seeders,
                        'leechers': leechers,
                        'pubdate': pubdate,
                    }
                    if mode != 'RSS':
                        log.debug('Found result: {0} with {1} seeders and {2} leechers',
                                  title, seeders, leechers)

                    items.append(item)
                except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                    log.exception('Failed parsing provider.')

        return items
Exemplo n.º 47
0
    def parse(self, data, mode):
        """
        Parse search results for items.

        :param data: The raw response from a search
        :param mode: The current mode used to search, e.g. RSS

        :return: A list of items found
        """
        items = []

        with BS4Parser(data, 'html5lib') as html:
            torrent_table = html.find('table', {'id': 'torrentsTable'})
            if torrent_table:
                torrent_rows = torrent_table.find_all('tr')

            # Continue only if at least one release is found
            if len(torrent_rows) < 2:
                log.debug('Data returned from provider does not contain any torrents')
                return items

            # Skip column headers
            for row in torrent_rows[1:]:
                try:
                    torrent_items = row.find_all('td')
                    title = torrent_items[1].find('a').get_text(strip=True)
                    download_url = torrent_items[2].find('a')['href']
                    if not all([title, download_url]):
                        continue
                    download_url = urljoin(self.url, download_url)

                    seeders = try_int(torrent_items[5].get_text(strip=True))
                    leechers = try_int(torrent_items[6].get_text(strip=True))

                    # Filter unseeded torrent
                    if seeders < self.minseed:
                        if mode != 'RSS':
                            log.debug("Discarding torrent because it doesn't meet the"
                                      ' minimum seeders: {0}. Seeders: {1}',
                                      title, seeders)
                        continue

                    torrent_size = torrent_items[4].get_text()
                    size = convert_size(torrent_size) or -1

                    pubdate_raw = torrent_items[1].find('div').get_text()
                    pubdate = self.parse_pubdate(pubdate_raw, human_time=True)

                    item = {
                        'title': title,
                        'link': download_url,
                        'size': size,
                        'seeders': seeders,
                        'leechers': leechers,
                        'pubdate': pubdate,
                    }
                    if mode != 'RSS':
                        log.debug('Found result: {0} with {1} seeders and {2} leechers',
                                  title, seeders, leechers)

                    items.append(item)
                except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                    log.exception('Failed parsing provider')

        return items
Exemplo n.º 48
0
    def parse(self, data, mode):
        """
        Parse search results for items.

        :param data: The raw response from a search
        :param mode: The current mode used to search, e.g. RSS

        :return: A list of items found
        """
        items = []

        with BS4Parser(data, 'html5lib') as html:
            torrent_table = html('div', class_='panel-body', limit=2)
            if mode != 'RSS':
                torrent_rows = torrent_table[1]('tr') if torrent_table else []
            else:
                torrent_rows = torrent_table[0]('tr') if torrent_table else []

            # Continue only if at least one release is found
            if len(torrent_rows) < 2:
                log.debug('Data returned from provider does not contain any torrents')
                return items

            # Skip column headers
            for row in torrent_rows[1:]:
                cells = row('td')

                try:
                    title = cells[1].find('a').get_text()
                    magnet = cells[2].find('a', title='Magnet link')['href']
                    download_url = '{magnet}{trackers}'.format(magnet=magnet,
                                                               trackers=self._custom_trackers)
                    if not all([title, download_url]):
                        continue

                    seeders = 1
                    leechers = 0
                    if len(cells) > 5:
                        peers = cells[5].find('div')
                        if peers and peers.get('title'):
                            peers = peers['title'].replace(',', '').split(' | ', 1)
                            # Removes 'Seeders: '
                            seeders = try_int(peers[0][9:])
                            # Removes 'Leechers: '
                            leechers = try_int(peers[1][10:])

                    # Filter unseeded torrent
                    if seeders < self.minseed:
                        if mode != 'RSS':
                            log.debug("Discarding torrent because it doesn't meet the"
                                      ' minimum seeders: {0}. Seeders: {1}',
                                      title, seeders)
                        continue

                    torrent_size = cells[3].get_text().replace(',', '')
                    size = convert_size(torrent_size) or -1

                    pubdate_raw = cells[4].get_text().replace('yesterday', '24 hours')
                    # "long ago" can't be translated to a date
                    if pubdate_raw == 'long ago':
                        pubdate_raw = None
                    pubdate = self.parse_pubdate(pubdate_raw, human_time=True)

                    item = {
                        'title': title,
                        'link': download_url,
                        'size': size,
                        'seeders': seeders,
                        'leechers': leechers,
                        'pubdate': pubdate,
                    }
                    if mode != 'RSS':
                        log.debug('Found result: {0} with {1} seeders and {2} leechers',
                                  title, seeders, leechers)

                    items.append(item)
                except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                    log.exception('Failed parsing provider.')

        return items
Exemplo n.º 49
0
    def run(self, force=False):
        """
        Run the daily searcher, queuing selected episodes for search.

        :param force: Force search
        """
        if self.amActive:
            log.debug('Daily search is still running, not starting it again')
            return
        elif app.forced_search_queue_scheduler.action.is_forced_search_in_progress() and not force:
            log.warning('Manual search is running. Unable to start Daily search')
            return

        self.amActive = True
        # Let's keep track of the exact time the scheduler kicked in,
        # as we need to compare to this time for each provider.
        scheduler_start_time = int(time())

        if not network_dict:
            update_network_dict()

        # The tvshows airdate_offset field is used to configure a search offset for specific shows.
        # This way we can search/accept results early or late, depending on the value.
        main_db_con = DBConnection()
        min_offset_show = main_db_con.select(
            'SELECT COUNT(*) as offsets, MIN(airdate_offset) AS min_offset '
            'FROM tv_shows '
            'WHERE paused = 0 AND airdate_offset < 0'
        )
        additional_search_offset = 0
        if min_offset_show and min_offset_show[0]['offsets'] > 0:
            additional_search_offset = int(ceil(abs(min_offset_show[0]['min_offset']) / 24.0))
            log.debug('Using an airdate offset of {min_offset_show} as we found show(s) with an airdate'
                      ' offset configured.', {'min_offset_show': min_offset_show[0]['min_offset']})

        cur_time = datetime.now(app_timezone)

        cur_date = (
            date.today() + timedelta(days=1 if network_dict else 2) + timedelta(days=additional_search_offset)
        ).toordinal()

        episodes_from_db = main_db_con.select(
            'SELECT indexer, showid, airdate, season, episode '
            'FROM tv_episodes '
            'WHERE status = ? AND (airdate <= ? and airdate > 1)',
            [common.UNAIRED, cur_date]
        )

        new_releases = []
        series_obj = None

        for db_episode in episodes_from_db:
            indexer_id = db_episode['indexer']
            series_id = db_episode['showid']
            try:
                if not series_obj or series_id != series_obj.indexerid:
                    series_obj = Show.find_by_id(app.showList, indexer_id, series_id)

                # for when there is orphaned series in the database but not loaded into our show list
                if not series_obj or series_obj.paused:
                    continue

            except MultipleShowObjectsException:
                log.info('ERROR: expected to find a single show matching {id}',
                         {'id': series_id})
                continue

            cur_ep = series_obj.get_episode(db_episode['season'], db_episode['episode'])

            if series_obj.airs and series_obj.network:
                # This is how you assure it is always converted to local time
                show_air_time = parse_date_time(db_episode['airdate'], series_obj.airs, series_obj.network)
                end_time = show_air_time.astimezone(app_timezone) + timedelta(minutes=try_int(series_obj.runtime, 60))

                if series_obj.airdate_offset != 0:
                    log.debug(
                        '{show}: Applying an airdate offset for the episode: {episode} of {offset} hours',
                        {'show': series_obj.name, 'episode': cur_ep.pretty_name(), 'offset': series_obj.airdate_offset})

                # filter out any episodes that haven't finished airing yet
                if end_time + timedelta(hours=series_obj.airdate_offset) > cur_time:
                    continue

            with cur_ep.lock:
                cur_ep.status = series_obj.default_ep_status if cur_ep.season else common.SKIPPED
                log.info(
                    'Setting status ({status}) for show airing today: {name} {special}', {
                        'name': cur_ep.pretty_name(),
                        'status': common.statusStrings[cur_ep.status],
                        'special': '(specials are not supported)' if not cur_ep.season else '',
                    }
                )
                new_releases.append(cur_ep.get_sql())

        if new_releases:
            main_db_con = DBConnection()
            main_db_con.mass_action(new_releases)

        # queue a daily search
        app.search_queue_scheduler.action.add_item(
            DailySearchQueueItem(scheduler_start_time, force=force)
        )

        self.amActive = False
Exemplo n.º 50
0
    def parse(self, data, mode):
        """
        Parse search results for items.

        :param data: The raw response from a search
        :param mode: The current mode used to search, e.g. RSS

        :return: A KV with a list of items found and if there's an next page to search
        """
        def process_column_header(td):
            ret = ''
            if td.a and td.a.img:
                ret = td.a.img.get('title', td.a.get_text(strip=True))
            if not ret:
                ret = td.get_text(strip=True)
            return ret

        items = []
        has_next_page = False
        with BS4Parser(data, 'html5lib') as html:
            torrent_table = html.find('table', id='torrent_table')
            torrent_rows = torrent_table('tr') if torrent_table else []

            # ignore next page in RSS mode
            has_next_page = mode != 'RSS' and html.find('a', class_='pager_next') is not None
            log.debug('Are there more pages? {0}'.format(has_next_page))

            # Continue only if at least one Release is found
            if len(torrent_rows) < 2:
                log.debug('Data returned from provider does not contain any torrents')
                return {'has_next_page': has_next_page, 'items': []}

            # '', '', 'Name /Year', 'Files', 'Time', 'Size', 'Snatches', 'Seeders', 'Leechers'
            labels = [process_column_header(label) for label in torrent_rows[0]('td')]
            group_title = ''

            # Skip column headers
            for result in torrent_rows[1:]:
                cells = result('td')
                result_class = result.get('class')
                # When "Grouping Torrents" is enabled, the structure of table change
                group_index = -2 if 'group_torrent' in result_class else 0
                try:
                    title = result.select('a[href^="torrents.php?id="]')[0].get_text()
                    title = re.sub(r'\s+', ' ', title).strip()  # clean empty lines and multiple spaces

                    if 'group' in result_class or 'torrent' in result_class:
                        # get international title if available
                        title = re.sub(r'.* \[(.*?)\](.*)', r'\1\2', title)

                    if 'group' in result_class:
                        group_title = title
                        continue

                    for serie in self.absolute_numbering:
                        if serie in title:
                            # remove season from title when its in absolute format
                            title = re.sub(r'S\d{2}E(\d{2,4})', r'\1', title)
                            break

                    download_url = urljoin(self.url, result.select('a[href^="torrents.php?action=download"]')[0]['href'])
                    if not all([title, download_url]):
                        continue

                    seeders = try_int(cells[labels.index('Seeders') + group_index].get_text(strip=True))
                    leechers = try_int(cells[labels.index('Leechers') + group_index].get_text(strip=True))

                    # Filter unseeded torrent
                    if seeders < self.minseed:
                        if mode != 'RSS':
                            log.debug("Discarding torrent because it doesn't meet the"
                                      ' minimum seeders: {0}. Seeders: {1}',
                                      title, seeders)
                        continue

                    torrent_details = None
                    if 'group_torrent' in result_class:
                        # torrents belonging to a group
                        torrent_details = title
                        title = group_title
                    elif 'torrent' in result_class:
                        # standalone/un grouped torrents
                        torrent_details = cells[labels.index('Nome/Ano')].find('div', class_='torrent_info').get_text()

                    torrent_details = torrent_details.replace('[', ' ').replace(']', ' ').replace('/', ' ')
                    torrent_details = torrent_details.replace('Full HD ', '1080p').replace('HD ', '720p')

                    torrent_size = cells[labels.index('Tamanho') + group_index].get_text(strip=True)
                    size = convert_size(torrent_size) or -1

                    torrent_name = '{0} {1}'.format(title, torrent_details.strip()).strip()
                    torrent_name = re.sub(r'\s+', ' ', torrent_name)

                    items.append({
                        'title': torrent_name,
                        'link': download_url,
                        'size': size,
                        'seeders': seeders,
                        'leechers': leechers,
                        'pubdate': None
                    })

                    if mode != 'RSS':
                        log.debug('Found result: {0} with {1} seeders and {2} leechers'.format
                                  (torrent_name, seeders, leechers))

                except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                    log.exception('Failed parsing provider.')

        return {'has_next_page': has_next_page, 'items': items}
Exemplo n.º 51
0
    def search(self, search_strings, age=0, ep_obj=None, **kwargs):
        """
        Search a provider and parse the results.

        :param search_strings: A dict with mode (key) and the search value (value)
        :param age: Not used
        :param ep_obj: Not used
        :returns: A list of search results (structure)
        """
        results = []
        if not self.login():
            return results

        # Search Params
        search_params = {
            'app_id': app.RARBG_APPID,
            'category': 'tv',
            'min_seeders': self.minseed,
            'min_leechers': self.minleech,
            'limit': 100,
            'format': 'json_extended',
            'ranked': try_int(self.ranked),
            'token': self.token,
            'sort': 'last',
            'mode': 'list',
        }

        for mode in search_strings:
            log.debug('Search mode: {0}', mode)

            if mode == 'RSS':
                search_params['search_string'] = None
                search_params['search_tvdb'] = None
            else:
                search_params['sort'] = self.sorting if self.sorting else 'seeders'
                search_params['mode'] = 'search'
                search_params['search_tvdb'] = self._get_tvdb_id()

            for search_string in search_strings[mode]:
                if mode != 'RSS':
                    log.debug('Search string: {search}',
                              {'search': search_string})
                    if self.ranked:
                        log.debug('Searching only ranked torrents')

                search_params['search_string'] = search_string

                # Check if token is still valid before search
                if not self.login():
                    continue

                # Maximum requests allowed are 1req/2sec
                # Changing to 5 because of server clock desync
                time.sleep(5)

                search_url = self.urls['api']
                response = self.session.get(search_url, params=search_params)
                if not response or not response.content:
                    log.debug('No data returned from provider')
                    continue

                try:
                    jdata = response.json()
                except ValueError:
                    log.debug('No data returned from provider')
                    continue

                error = jdata.get('error')
                error_code = jdata.get('error_code')
                if error:
                    # List of errors: https://github.com/rarbg/torrentapi/issues/1#issuecomment-114763312
                    if error_code == 5:
                        # 5 = Too many requests per second
                        log_level = logging.INFO
                    elif error_code not in (4, 8, 10, 12, 14, 20):
                        # 4 = Invalid token. Use get_token for a new one!
                        # 8, 10, 12, 14 = Cant find * in database. Are you sure this * exists?
                        # 20 = No results found
                        log_level = logging.WARNING
                    else:
                        log_level = logging.DEBUG

                    log.log(log_level, '{msg} Code: {code}', {'msg': error, 'code': error_code})
                    continue

                results += self.parse(jdata, mode)

        return results
Exemplo n.º 52
0
    def addShowByID(self, indexername=None, seriesid=None, show_name=None, which_series=None,
                    indexer_lang=None, root_dir=None, default_status=None,
                    quality_preset=None, any_qualities=None, best_qualities=None,
                    season_folders=None, subtitles=None, full_show_path=None,
                    other_shows=None, skip_show=None, provided_indexer=None,
                    anime=None, scene=None, blacklist=None, whitelist=None,
                    default_status_after=None, configure_show_options=False):
        """
        Add's a new show with provided show options by indexer_id.
        Currently only TVDB and IMDB id's supported.
        """
        series_id = seriesid
        if indexername != 'tvdb':
            series_id = helpers.get_tvdb_from_id(seriesid, indexername.upper())
            if not series_id:
                log.info('Unable to find tvdb ID to add {name}', {'name': show_name})
                ui.notifications.error(
                    'Unable to add {0}'.format(show_name),
                    'Could not add {0}. We were unable to locate the tvdb id at this time.'.format(show_name)
                )
                return json_response(
                    result=False,
                    message='Unable to find tvdb ID to add {show}'.format(show=show_name)
                )

        if Show.find_by_id(app.showList, INDEXER_TVDBV2, series_id):
            return json_response(
                result=False,
                message='Show already exists'
            )

        # Sanitize the parameter allowed_qualities and preferred_qualities. As these would normally be passed as lists
        if any_qualities:
            any_qualities = any_qualities.split(',')
        else:
            any_qualities = []

        if best_qualities:
            best_qualities = best_qualities.split(',')
        else:
            best_qualities = []

        # If configure_show_options is enabled let's use the provided settings
        configure_show_options = config.checkbox_to_value(configure_show_options)

        if configure_show_options:
            # prepare the inputs for passing along
            scene = config.checkbox_to_value(scene)
            anime = config.checkbox_to_value(anime)
            season_folders = config.checkbox_to_value(season_folders)
            subtitles = config.checkbox_to_value(subtitles)

            if whitelist:
                whitelist = short_group_names(whitelist)
            if blacklist:
                blacklist = short_group_names(blacklist)

            if not any_qualities:
                any_qualities = []

            if not best_qualities or try_int(quality_preset, None):
                best_qualities = []

            if not isinstance(any_qualities, list):
                any_qualities = [any_qualities]

            if not isinstance(best_qualities, list):
                best_qualities = [best_qualities]

            quality = Quality.combine_qualities([int(q) for q in any_qualities], [int(q) for q in best_qualities])

            location = root_dir

        else:
            default_status = app.STATUS_DEFAULT
            quality = app.QUALITY_DEFAULT
            season_folders = app.SEASON_FOLDERS_DEFAULT
            subtitles = app.SUBTITLES_DEFAULT
            anime = app.ANIME_DEFAULT
            scene = app.SCENE_DEFAULT
            default_status_after = app.STATUS_DEFAULT_AFTER

            if app.ROOT_DIRS:
                root_dirs = app.ROOT_DIRS
                location = root_dirs[int(root_dirs[0]) + 1]
            else:
                location = None

        if not location:
            log.warning('There was an error creating the show, no root directory setting found')
            return json_response(
                result=False,
                message='No root directories set up, please go back and add one.'
            )

        show_name = get_showname_from_indexer(INDEXER_TVDBV2, series_id)
        show_dir = None

        # add the show
        app.show_queue_scheduler.action.addShow(INDEXER_TVDBV2, int(series_id), show_dir, int(default_status), quality,
                                                season_folders, indexer_lang, subtitles, anime, scene, None, blacklist,
                                                whitelist, int(default_status_after), root_dir=location)

        ui.notifications.message('Show added', 'Adding the specified show {0}'.format(show_name))

        # done adding show
        return json_response(
            message='Adding the specified show {0}'.format(show_name),
            redirect='home'
        )
Exemplo n.º 53
0
    def saveGeneral(self, log_dir=None, log_nr=5, log_size=1, web_port=None, notify_on_login=None, web_log=None, encryption_version=None, web_ipv6=None,
                    trash_remove_show=None, trash_rotate_logs=None, update_frequency=None, skip_removed_files=None,
                    indexerDefaultLang='en', ep_default_deleted_status=None, launch_browser=None, showupdate_hour=3, web_username=None,
                    api_key=None, indexer_default=None, timezone_display=None, cpu_preset='NORMAL', layout_wide=None,
                    web_password=None, version_notify=None, enable_https=None, https_cert=None, https_key=None,
                    handle_reverse_proxy=None, sort_article=None, auto_update=None, notify_on_update=None,
                    proxy_setting=None, proxy_indexers=None, anon_redirect=None, git_path=None, git_remote=None,
                    calendar_unprotected=None, calendar_icons=None, debug=None, ssl_verify=None, no_restart=None, coming_eps_missed_range=None,
                    fuzzy_dating=None, trim_zero=None, date_preset=None, date_preset_na=None, time_preset=None,
                    indexer_timeout=None, download_url=None, rootDir=None, theme_name=None, default_page=None,
                    git_reset=None, git_reset_branches=None, git_auth_type=0, git_username=None, git_password=None, git_token=None,
                    display_all_seasons=None, subliminal_log=None, privacy_level='normal', fanart_background=None, fanart_background_opacity=None,
                    dbdebug=None, fallback_plex_enable=1, fallback_plex_notifications=1, fallback_plex_timeout=3, web_root=None, ssl_ca_bundle=None):

        results = []

        # Misc
        app.DOWNLOAD_URL = download_url
        app.INDEXER_DEFAULT_LANGUAGE = indexerDefaultLang
        app.EP_DEFAULT_DELETED_STATUS = int(ep_default_deleted_status)
        app.SKIP_REMOVED_FILES = config.checkbox_to_value(skip_removed_files)
        app.LAUNCH_BROWSER = config.checkbox_to_value(launch_browser)
        config.change_SHOWUPDATE_HOUR(showupdate_hour)
        config.change_VERSION_NOTIFY(config.checkbox_to_value(version_notify))
        app.AUTO_UPDATE = config.checkbox_to_value(auto_update)
        app.NOTIFY_ON_UPDATE = config.checkbox_to_value(notify_on_update)
        # app.LOG_DIR is set in config.change_LOG_DIR()
        app.LOG_NR = log_nr
        app.LOG_SIZE = float(log_size)

        app.TRASH_REMOVE_SHOW = config.checkbox_to_value(trash_remove_show)
        app.TRASH_ROTATE_LOGS = config.checkbox_to_value(trash_rotate_logs)
        config.change_UPDATE_FREQUENCY(update_frequency)
        app.LAUNCH_BROWSER = config.checkbox_to_value(launch_browser)
        app.SORT_ARTICLE = config.checkbox_to_value(sort_article)
        app.CPU_PRESET = cpu_preset
        app.ANON_REDIRECT = anon_redirect
        app.PROXY_SETTING = proxy_setting
        app.PROXY_INDEXERS = config.checkbox_to_value(proxy_indexers)
        app.GIT_AUTH_TYPE = int(git_auth_type)
        app.GIT_USERNAME = git_username
        app.GIT_PASSWORD = git_password
        app.GIT_TOKEN = git_token
        app.GIT_RESET = config.checkbox_to_value(git_reset)
        app.GIT_RESET_BRANCHES = [helpers.to_text(branch) for branch in
                                  helpers.ensure_list(git_reset_branches)]
        if app.GIT_PATH != git_path:
            app.GIT_PATH = git_path
            config.change_GIT_PATH()
        app.GIT_REMOTE = git_remote
        app.CALENDAR_UNPROTECTED = config.checkbox_to_value(calendar_unprotected)
        app.CALENDAR_ICONS = config.checkbox_to_value(calendar_icons)
        app.NO_RESTART = config.checkbox_to_value(no_restart)

        app.SSL_VERIFY = config.checkbox_to_value(ssl_verify)
        app.SSL_CA_BUNDLE = ssl_ca_bundle
        # app.LOG_DIR is set in config.change_LOG_DIR()
        app.COMING_EPS_MISSED_RANGE = int(coming_eps_missed_range)
        app.DISPLAY_ALL_SEASONS = config.checkbox_to_value(display_all_seasons)
        app.NOTIFY_ON_LOGIN = config.checkbox_to_value(notify_on_login)
        app.WEB_PORT = int(web_port)
        app.WEB_IPV6 = config.checkbox_to_value(web_ipv6)
        if config.checkbox_to_value(encryption_version) == 1:
            app.ENCRYPTION_VERSION = 2
        else:
            app.ENCRYPTION_VERSION = 0
        app.WEB_USERNAME = web_username
        app.WEB_PASSWORD = web_password
        app.WEB_ROOT = web_root

        app.DEBUG = config.checkbox_to_value(debug)
        app.DBDEBUG = config.checkbox_to_value(dbdebug)
        app.WEB_LOG = config.checkbox_to_value(web_log)
        app.SUBLIMINAL_LOG = config.checkbox_to_value(subliminal_log)

        # Added for tvdb / plex fallback
        app.FALLBACK_PLEX_ENABLE = config.checkbox_to_value(fallback_plex_enable)
        app.FALLBACK_PLEX_NOTIFICATIONS = config.checkbox_to_value(fallback_plex_notifications)
        app.FALLBACK_PLEX_TIMEOUT = try_int(fallback_plex_timeout)

        if not config.change_LOG_DIR(log_dir):
            results += ['Unable to create directory {dir}, '
                        'log directory not changed.'.format(dir=os.path.normpath(log_dir))]

        # Reconfigure the logger
        logger.reconfigure()

        # Validate github credentials
        try:
            if app.GIT_AUTH_TYPE == 0:
                github_client.authenticate(app.GIT_USERNAME, app.GIT_PASSWORD)
            else:
                github = github_client.token_authenticate(app.GIT_TOKEN)
                if app.GIT_USERNAME and app.GIT_USERNAME != github_client.get_user(gh=github):
                    app.GIT_USERNAME = github_client.get_user(gh=github)
        except (GithubException, IOError):
            logger.log('Error while validating your Github credentials.', logger.WARNING)

        app.PRIVACY_LEVEL = privacy_level.lower()

        app.FUZZY_DATING = config.checkbox_to_value(fuzzy_dating)
        app.TRIM_ZERO = config.checkbox_to_value(trim_zero)

        if date_preset:
            app.DATE_PRESET = date_preset

        if indexer_default:
            app.INDEXER_DEFAULT = try_int(indexer_default)

        if indexer_timeout:
            app.INDEXER_TIMEOUT = try_int(indexer_timeout)

        if time_preset:
            app.TIME_PRESET_W_SECONDS = time_preset
            app.TIME_PRESET = app.TIME_PRESET_W_SECONDS.replace(u':%S', u'')

        app.TIMEZONE_DISPLAY = timezone_display

        app.API_KEY = api_key

        app.ENABLE_HTTPS = config.checkbox_to_value(enable_https)

        if not config.change_HTTPS_CERT(https_cert):
            results += ['Unable to create directory {dir}, '
                        'https cert directory not changed.'.format(dir=os.path.normpath(https_cert))]

        if not config.change_HTTPS_KEY(https_key):
            results += ['Unable to create directory {dir}, '
                        'https key directory not changed.'.format(dir=os.path.normpath(https_key))]

        app.HANDLE_REVERSE_PROXY = config.checkbox_to_value(handle_reverse_proxy)

        config.change_theme(theme_name)

        app.LAYOUT_WIDE = config.checkbox_to_value(layout_wide)
        app.FANART_BACKGROUND = config.checkbox_to_value(fanart_background)
        app.FANART_BACKGROUND_OPACITY = fanart_background_opacity

        app.DEFAULT_PAGE = default_page

        app.instance.save_config()

        if results:
            for x in results:
                logger.log(x, logger.ERROR)
            ui.notifications.error('Error(s) Saving Configuration',
                                   '<br>\n'.join(results))
        else:
            ui.notifications.message('Configuration Saved', os.path.join(app.CONFIG_FILE))

        return self.redirect('/config/general/')
Exemplo n.º 54
0
    def parse(self, data, mode):
        """
        Parse search results for items.

        :param data: The raw response from a search
        :param mode: The current mode used to search, e.g. RSS

        :return: A list of items found
        """
        # Units
        units = ['B', 'KIB', 'MIB', 'GIB', 'TIB', 'PIB']

        def process_column_header(th):
            result = ''
            if th.a:
                result = th.a.get_text(strip=True)
            if not result:
                result = th.get_text(strip=True)
            return result

        items = []

        with BS4Parser(data, 'html5lib') as html:
            torrent_table = html.find('table', id='searchResult')
            torrent_rows = torrent_table('tr') if torrent_table else []

            # Continue only if at least one release is found
            if len(torrent_rows) < 2:
                log.debug('Data returned from provider does not contain any {0}torrents',
                          'confirmed ' if self.confirmed else '')
                return items

            labels = [process_column_header(label) for label in torrent_rows[0]('th')]

            # Skip column headers
            for row in torrent_rows[1:]:
                cells = row('td')
                if len(cells) < len(labels):
                    continue

                try:
                    title = row.find(class_='detName')
                    title = title.get_text(strip=True) if title else None
                    download_url = row.find(title='Download this torrent using magnet')
                    download_url = download_url['href'] + self._custom_trackers if download_url else None
                    if download_url and 'magnet:?' not in download_url:
                        log.debug('Invalid ThePirateBay proxy please try another one')
                        continue
                    if not all([title, download_url]):
                        continue

                    seeders = try_int(cells[labels.index('SE')].get_text(strip=True), 1)
                    leechers = try_int(cells[labels.index('LE')].get_text(strip=True))

                    # Filter unseeded torrent
                    if seeders < self.minseed:
                        if mode != 'RSS':
                            log.debug("Discarding torrent because it doesn't meet the"
                                      ' minimum seeders: {0}. Seeders: {1}',
                                      title, seeders)
                        continue

                    # Accept Torrent only from Good People for every Episode Search
                    if self.confirmed and not row.find(alt=re.compile(r'VIP|Trusted')):
                        if mode != 'RSS':
                            log.debug("Found result {0} but that doesn't seem like a trusted"
                                      " result so I'm ignoring it", title)
                        continue

                    # Convert size after all possible skip scenarios
                    torrent_size = cells[labels.index('Name')].find(class_='detDesc')
                    torrent_size = torrent_size.get_text(strip=True).split(', ')[1]
                    torrent_size = re.sub(r'Size ([\d.]+).+([KMGT]iB)', r'\1 \2', torrent_size)
                    size = convert_size(torrent_size, units=units) or -1

                    item = {
                        'title': title,
                        'link': download_url,
                        'size': size,
                        'seeders': seeders,
                        'leechers': leechers,
                        'pubdate': None,
                    }
                    if mode != 'RSS':
                        log.debug('Found result: {0} with {1} seeders and {2} leechers',
                                  title, seeders, leechers)

                    items.append(item)
                except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                    log.exception('Failed parsing provider.')

        return items
Exemplo n.º 55
0
 def _get_result_info(self, item):
     # Get seeders/leechers for Torznab
     seeders = item.get('seeders', -1)
     leechers = item.get('leechers', -1)
     return try_int(seeders, -1), try_int(leechers, -1)
Exemplo n.º 56
0
    def parse(self, data, mode):
        """
        Parse search results for items.

        :param data: The raw response from a search
        :param mode: The current mode used to search, e.g. RSS

        :return: A list of items found
        """
        def get_label_title(label):
            """Get table row header labels."""
            if label.get_text():
                return label.get_text(strip=True)
            if label.a and label.a.get_text(strip=True):
                return label.a.get_text(strip=True)
            if label.img:
                return label.img.get('title')

        items = []
        if '<h2>Nothing found!</h2>' in data:
            log.debug('Data returned from provider does not contain any torrents')
            return items

        with BS4Parser(data, 'html.parser') as html:
            torrent_table = html.find('table', width='100%')
            torrent_rows = torrent_table('tr') if torrent_table else []

            # Continue only if at least one release is found
            if len(torrent_rows) < 1:
                log.debug('Data returned from provider does not contain any torrents')
                return items

            # Cat., Active, Name, Download, Added, Size, Uploader, Seeders, Leechers
            labels = [get_label_title(label) for label in
                      torrent_rows[0]('td')]

            for row in torrent_rows[1:]:
                try:
                    cells = row.findChildren('td')[:len(labels)]
                    if len(cells) < len(labels):
                        continue

                    title = cells[labels.index('Name')].a
                    title = title.get_text(strip=True) if title else None
                    link = cells[labels.index('Download')].a
                    link = link.get('href') if link else None
                    download_url = urljoin(self.url, link) if link else None
                    if not all([title, download_url]):
                        continue

                    seeders = try_int(cells[labels.index('Seeders')].get_text(strip=True))
                    leechers = try_int(cells[labels.index('Leechers')].get_text(strip=True))

                    # Filter unseeded torrent
                    if seeders < self.minseed:
                        if mode != 'RSS':
                            log.debug("Discarding torrent because it doesn't meet the"
                                      ' minimum seeders: {0}. Seeders: {1}',
                                      title, seeders)
                        continue

                    torrent_size, _, unit = cells[labels.index('Size')].contents
                    size = convert_size('{0} {1}'.format(torrent_size, unit)) or -1

                    pubdate_raw = cells[labels.index('Added')].get_text()
                    pubdate = self.parse_pubdate(pubdate_raw)

                    item = {
                        'title': title,
                        'link': download_url,
                        'size': size,
                        'seeders': seeders,
                        'leechers': leechers,
                        'pubdate': pubdate,
                    }
                    if mode != 'RSS':
                        log.debug('Found result: {0} with {1} seeders and {2} leechers',
                                  title, seeders, leechers)

                    items.append(item)
                except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                    log.exception('Failed parsing provider.')

        return items
Exemplo n.º 57
0
    def parse(self, data, mode):
        """
        Parse search results for items.

        :param data: The raw response from a search
        :param mode: The current mode used to search, e.g. RSS

        :return: A list of items found
        """
        items = []

        with BS4Parser(data, 'html5lib') as soup:
            torrent_table = soup.find('table', class_='listing')
            torrent_rows = torrent_table('tr') if torrent_table else []

            # Continue only if at least one release is found
            if len(torrent_rows) < 2:
                log.debug('Data returned from provider does not contain any torrents')
                return items

            a = 1 if len(torrent_rows[0]('td')) < 2 else 0

            # Skip column headers
            for top, bot in zip(torrent_rows[a::2], torrent_rows[a + 1::2]):
                try:
                    desc_top = top.find('td', class_='desc-top')
                    title = desc_top.get_text(strip=True) if desc_top else None
                    download_url = desc_top.find('a')['href'] if desc_top else None
                    if not all([title, download_url]):
                        continue

                    stats = bot.find('td', class_='stats').get_text(strip=True)
                    sl = re.match(r'S:(?P<seeders>\d+)L:(?P<leechers>\d+)C:(?:\d+)ID:(?:\d+)', stats.replace(' ', ''))
                    seeders = try_int(sl.group('seeders')) if sl else 0
                    leechers = try_int(sl.group('leechers')) if sl else 0

                    # Filter unseeded torrent
                    if seeders < self.minseed:
                        if mode != 'RSS':
                            log.debug("Discarding torrent because it doesn't meet the"
                                      ' minimum seeders: {0}. Seeders: {1}',
                                      title, seeders)
                        continue

                    desc_bottom = bot.find('td', class_='desc-bot').get_text(strip=True)
                    size = convert_size(desc_bottom.split('|')[1].strip('Size: ')) or -1

                    item = {
                        'title': title,
                        'link': download_url,
                        'size': size,
                        'seeders': seeders,
                        'leechers': leechers,
                        'pubdate': None,
                    }
                    if mode != 'RSS':
                        log.debug('Found result: {0} with {1} seeders and {2} leechers',
                                  title, seeders, leechers)

                    items.append(item)
                except (AttributeError, TypeError, KeyError, ValueError, IndexError):
                    log.exception('Failed parsing provider.')

        return items