示例#1
0
    def __init__(self):
        self.updater = None
        self.install_type = None
        self.amActive = False
        self.install_type = self.find_install_type()
        if self.install_type == 'git':
            self.updater = GitUpdateManager()
        elif self.install_type == 'source':
            self.updater = SourceUpdateManager()

        self.session = MedusaSafeSession()
示例#2
0
    def __init__(self, name):
        """Initialize the class."""
        self.name = name

        self.anime_only = False
        self.bt_cache_urls = [
            'http://reflektor.karmorra.info/torrent/{info_hash}.torrent',
            'https://torrent.cd/torrents/download/{info_hash}/.torrent',
            'https://asnet.pw/download/{info_hash}/',
            'http://p2pdl.com/download/{info_hash}',
            'http://itorrents.org/torrent/{info_hash}.torrent',
            'http://thetorrent.org/torrent/{info_hash}.torrent',
            'https://cache.torrentgalaxy.org/get/{info_hash}',
        ]
        self.cache = tv.Cache(self)
        self.enable_backlog = False
        self.enable_manualsearch = False
        self.enable_daily = False
        self.enabled = False
        self.headers = {'User-Agent': USER_AGENT}
        self.proper_strings = ['PROPER|REPACK|REAL|RERIP']
        self.provider_type = None
        self.public = False
        self.search_fallback = False
        self.search_mode = None
        self.session = MedusaSafeSession(cloudflare=True)
        self.session.headers.update(self.headers)
        self.series = None
        self.supports_absolute_numbering = False
        self.supports_backlog = True
        self.url = ''
        self.urls = {}

        # Ability to override the search separator. As for example anizb is using '*' instead of space.
        self.search_separator = ' '
        self.season_templates = (
            'S{season:0>2}',  # example: 'Series.Name.S03'
        )

        # Use and configure the attribute enable_cookies to show or hide the cookies input field per provider
        self.enable_cookies = False
        self.cookies = ''

        # Paramaters for reducting the daily search results parsing
        self.max_recent_items = 5
        self.stop_at = 3

        # Delay downloads
        self.enable_search_delay = False
        self.search_delay = 480  # minutes
示例#3
0
    def __init__(self, name):
        """Initialize the class."""
        self.name = name

        self.anime_only = False
        self.bt_cache_urls = [
            'https://torrentproject.se/torrent/{info_hash}.torrent',
            'http://reflektor.karmorra.info/torrent/{info_hash}.torrent',
            'http://thetorrent.org/torrent/{info_hash}.torrent',
            'http://piratepublic.com/download.php?id={info_hash}',
            'http://www.legittorrents.info/download.php?id={info_hash}',
            'https://torrent.cd/torrents/download/{info_hash}/.torrent',
            'https://asnet.pw/download/{info_hash}/',
            'https://skytorrents.in/file/{info_hash}/.torrent',
            'http://p2pdl.com/download/{info_hash}',
            'http://itorrents.org/torrent/{info_hash}.torrent',
            'https://torcache.pro/{info_hash}.torrent',
        ]
        self.cache = tv.Cache(self)
        self.enable_backlog = False
        self.enable_manualsearch = False
        self.enable_daily = False
        self.enabled = False
        self.headers = {'User-Agent': USER_AGENT}
        self.proper_strings = ['PROPER|REPACK|REAL|RERIP']
        self.provider_type = None
        self.public = False
        self.search_fallback = False
        self.search_mode = None
        self.session = MedusaSafeSession(hooks=[cloudflare])
        self.session.headers.update(self.headers)
        self.show = None
        self.supports_absolute_numbering = False
        self.supports_backlog = True
        self.url = ''
        self.urls = {}
        # Ability to override the search separator. As for example anizb is using '*' instead of space.
        self.search_separator = ' '

        # Use and configure the attribute enable_cookies to show or hide the cookies input field per provider
        self.enable_cookies = False
        self.cookies = ''

        # Paramaters for reducting the daily search results parsing
        self.max_recent_items = 5
        self.stop_at = 3

        # Police attributes
        self.enable_api_hit_cooldown = False
        self.enable_daily_request_reserve = False
示例#4
0
class HomeChangeLog(Home):
    session = MedusaSafeSession()

    def __init__(self, *args, **kwargs):
        super(HomeChangeLog, self).__init__(*args, **kwargs)

    def index(self):
        # TODO: SESSION: Check if this needs some more explicit exception handling.
        changes = HomeChangeLog.session.get_text(app.CHANGES_URL)

        if not changes:
            logger.log('Could not load changes from repo, giving a link!',
                       logger.DEBUG)
            changes = 'Could not load changes from the repo. [Click here for CHANGES.md]({url})'.format(
                url=app.CHANGES_URL)

        t = PageTemplate(rh=self, filename='markdown.mako')
        data = markdown2.markdown(
            changes if changes else
            'The was a problem connecting to github, please refresh and try again',
            extras=['header-ids'])

        return t.render(title='Changelog',
                        header='Changelog',
                        data=data,
                        controller='changes',
                        action='index')
示例#5
0
    def __init__(self):
        self.github_org = self.get_github_org()
        self.github_repo = self.get_github_repo()

        self.branch = app.BRANCH
        if app.BRANCH == '':
            self.branch = self._find_installed_branch()

        self._cur_commit_hash = app.CUR_COMMIT_HASH
        self._newest_commit_hash = None
        self._num_commits_behind = 0
        self._num_commits_ahead = 0

        self.session = MedusaSafeSession()
示例#6
0
    def __init__(self, name):
        """Initialize the class."""
        self.name = name

        self.anime_only = False
        self.bt_cache_urls = [
            'http://reflektor.karmorra.info/torrent/{info_hash}.torrent',
            'https://asnet.pw/download/{info_hash}/',
            'http://p2pdl.com/download/{info_hash}',
            'http://itorrents.org/torrent/{info_hash}.torrent',
            'http://thetorrent.org/torrent/{info_hash}.torrent',
            'https://cache.torrentgalaxy.org/get/{info_hash}',
            'https://www.seedpeer.me/torrent/{info_hash}',
        ]
        self.cache = tv.Cache(self)
        self.enable_backlog = False
        self.enable_manualsearch = False
        self.enable_daily = False
        self.enabled = False
        self.headers = {'User-Agent': USER_AGENT}
        self.proper_strings = ['PROPER|REPACK|REAL|RERIP']
        self.provider_type = None
        self.public = False
        self.search_fallback = False
        self.search_mode = None
        self.session = MedusaSafeSession(cloudflare=True)
        self.session.headers.update(self.headers)
        self.series = None
        self.supports_absolute_numbering = False
        self.supports_backlog = True
        self.url = ''
        self.urls = {}

        # Ability to override the search separator. As for example anizb is using '*' instead of space.
        self.search_separator = ' '
        self.season_templates = (
            'S{season:0>2}',  # example: 'Series.Name.S03'
        )

        # Use and configure the attribute enable_cookies to show or hide the cookies input field per provider
        self.enable_cookies = False
        self.cookies = ''

        # Paramaters for reducting the daily search results parsing
        self.max_recent_items = 5
        self.stop_at = 3

        # Delay downloads
        self.enable_search_delay = False
        self.search_delay = 480  # minutes
示例#7
0
    def resource_get_changelog(self):
        """Retrieve changelog and convert the markdown to html."""
        # TODO: SESSION: Check if this needs some more explicit exception handling.
        from medusa.session.core import MedusaSafeSession
        changes = MedusaSafeSession().get_text(app.CHANGES_URL)

        if not changes:
            changes = 'Could not load changes from the repo. [Click here for CHANGES.md]({url})'.format(
                url=app.CHANGES_URL)

        data = markdown2.markdown(
            changes if changes else
            'The was a problem connecting to github, please refresh and try again',
            extras=['header-ids'])
        return self._ok(data)
示例#8
0
from __future__ import unicode_literals

import datetime
import logging

from medusa import app
from medusa.helper.common import sanitize_filename
from medusa.logger.adapters.style import BraceAdapter
from medusa.session.core import MedusaSafeSession

from requests.compat import urljoin

log = BraceAdapter(logging.getLogger(__name__))
log.logger.addHandler(logging.NullHandler())

session = MedusaSafeSession()


def send_nzb(nzb):
    """
    Dispatch method for sending an nzb to sabnzbd using it's api.

    :param nzb: nzb SearchResult object
    :return: result of the communication with sabnzbd (True/False)
    """
    session.params.update({
        'output': 'json',
        'ma_username': app.SAB_USERNAME,
        'ma_password': app.SAB_PASSWORD,
        'apikey': app.SAB_APIKEY,
    })
示例#9
0
class GenericProvider(object):
    """Generic provider."""

    NZB = 'nzb'
    TORRENT = 'torrent'

    def __init__(self, name):
        """Initialize the class."""
        self.name = name

        self.anime_only = False
        self.bt_cache_urls = [
            'http://reflektor.karmorra.info/torrent/{info_hash}.torrent',
            'https://torrent.cd/torrents/download/{info_hash}/.torrent',
            'https://asnet.pw/download/{info_hash}/',
            'http://p2pdl.com/download/{info_hash}',
            'http://itorrents.org/torrent/{info_hash}.torrent',
        ]
        self.cache = tv.Cache(self)
        self.enable_backlog = False
        self.enable_manualsearch = False
        self.enable_daily = False
        self.enabled = False
        self.headers = {'User-Agent': USER_AGENT}
        self.proper_strings = ['PROPER|REPACK|REAL|RERIP']
        self.provider_type = None
        self.public = False
        self.search_fallback = False
        self.search_mode = None
        self.session = MedusaSafeSession(hooks=[cloudflare])
        self.session.headers.update(self.headers)
        self.show = None
        self.supports_absolute_numbering = False
        self.supports_backlog = True
        self.url = ''
        self.urls = {}

        # Ability to override the search separator. As for example anizb is using '*' instead of space.
        self.search_separator = ' '
        self.season_templates = (
            'S{season:0>2}',  # example: 'Series.Name.S03'
        )

        # Use and configure the attribute enable_cookies to show or hide the cookies input field per provider
        self.enable_cookies = False
        self.cookies = ''

        # Paramaters for reducting the daily search results parsing
        self.max_recent_items = 5
        self.stop_at = 3

        # Police attributes
        self.enable_api_hit_cooldown = False
        self.enable_daily_request_reserve = False

    def download_result(self, result):
        """Download result from provider."""
        if not self.login():
            return False

        urls, filename = self._make_url(result)

        for url in urls:
            if 'NO_DOWNLOAD_NAME' in url:
                continue

            if url.startswith('http'):
                self.headers.update(
                    {'Referer': '/'.join(url.split('/')[:3]) + '/'})

            log.info('Downloading {result} from {provider} at {url}', {
                'result': result.name,
                'provider': self.name,
                'url': url
            })

            verify = False if self.public else None

            if download_file(url,
                             filename,
                             session=self.session,
                             headers=self.headers,
                             hooks={'response': self.get_url_hook},
                             verify=verify):

                if self._verify_download(filename):
                    log.info('Saved {result} to {location}', {
                        'result': result.name,
                        'location': filename
                    })
                    return True

        if urls:
            log.warning('Failed to download any results for {result}',
                        {'result': result.name})

        return False

    def get_content(self, url, params=None, timeout=30, **kwargs):
        """Retrieve the torrent/nzb content."""
        return self.session.get_content(url,
                                        params=params,
                                        timeout=timeout,
                                        **kwargs)

    def find_propers(self, proper_candidates):
        """Find propers in providers."""
        results = []

        for proper_candidate in proper_candidates:
            show_obj = Show.find(app.showList, int(proper_candidate[b'showid'])
                                 ) if proper_candidate[b'showid'] else None
            if show_obj:
                self.show = show_obj
                episode_obj = show_obj.get_episode(
                    proper_candidate[b'season'], proper_candidate[b'episode'])

                for term in self.proper_strings:
                    search_strings = self._get_episode_search_strings(
                        episode_obj, add_string=term)

                    for item in self.search(search_strings[0],
                                            ep_obj=episode_obj):
                        search_result = self.get_result()
                        results.append(search_result)

                        search_result.name, search_result.url = self._get_title_and_url(
                            item)
                        search_result.seeders, search_result.leechers = self._get_result_info(
                            item)
                        search_result.size = self._get_size(item)
                        search_result.pubdate = self._get_pubdate(item)

                        # This will be retrieved from the parser
                        search_result.proper_tags = ''

                        search_result.search_type = PROPER_SEARCH
                        search_result.date = datetime.today()
                        search_result.show = show_obj

        return results

    def find_search_results(self,
                            show,
                            episodes,
                            search_mode,
                            forced_search=False,
                            download_current_quality=False,
                            manual_search=False,
                            manual_search_type='episode'):
        """Search episodes based on param."""
        self._check_auth()
        self.show = show

        results = {}
        items_list = []

        for episode in episodes:
            if not manual_search:
                cache_result = self.cache.search_cache(
                    episode,
                    forced_search=forced_search,
                    down_cur_quality=download_current_quality)
                if cache_result:
                    if episode.episode not in results:
                        results[episode.episode] = cache_result
                    else:
                        results[episode.episode].extend(cache_result)

                    continue

            search_strings = []
            season_search = (len(episodes) > 1 or manual_search_type
                             == 'season') and search_mode == 'sponly'
            if season_search:
                search_strings = self._get_season_search_strings(episode)
            elif search_mode == 'eponly':
                search_strings = self._get_episode_search_strings(episode)

            for search_string in search_strings:
                # Find results from the provider
                items_list += self.search(search_string,
                                          ep_obj=episode,
                                          manual_search=manual_search)

            # In season search, we can't loop in episodes lists as we only need one episode to get the season string
            if search_mode == 'sponly':
                break

        if len(results) == len(episodes):
            return results

        if items_list:
            # categorize the items into lists by quality
            items = defaultdict(list)
            for item in items_list:
                items[self.get_quality(item, anime=show.is_anime)].append(item)

            # temporarily remove the list of items with unknown quality
            unknown_items = items.pop(Quality.UNKNOWN, [])

            # make a generator to sort the remaining items by descending quality
            items_list = (items[quality]
                          for quality in sorted(items, reverse=True))

            # unpack all of the quality lists into a single sorted list
            items_list = list(chain(*items_list))

            # extend the list with the unknown qualities, now sorted at the bottom of the list
            items_list.extend(unknown_items)

        cl = []

        # Move through each item and parse it into a quality
        search_results = []
        for item in items_list:

            # Make sure we start with a TorrentSearchResult, NZBDataSearchResult or NZBSearchResult search result obj.
            search_result = self.get_result()
            search_results.append(search_result)
            search_result.item = item
            search_result.download_current_quality = download_current_quality
            # FIXME: Should be changed to search_result.search_type
            search_result.forced_search = forced_search

            (search_result.name,
             search_result.url) = self._get_title_and_url(item)
            (search_result.seeders,
             search_result.leechers) = self._get_result_info(item)

            search_result.size = self._get_size(item)
            search_result.pubdate = self._get_pubdate(item)

            search_result.result_wanted = True

            try:
                search_result.parsed_result = NameParser(
                    parse_method=('normal', 'anime')[show.is_anime]).parse(
                        search_result.name)
            except (InvalidNameException, InvalidShowException) as error:
                log.debug(
                    'Error during parsing of release name: {release_name}, with error: {error}',
                    {
                        'release_name': search_result.name,
                        'error': error
                    })
                search_result.add_cache_entry = False
                search_result.result_wanted = False
                continue

            # I don't know why i'm doing this. Maybe remove it later on all together, now i've added the parsed_result
            # to the search_result.
            search_result.show = search_result.parsed_result.show
            search_result.quality = search_result.parsed_result.quality
            search_result.release_group = search_result.parsed_result.release_group
            search_result.version = search_result.parsed_result.version
            search_result.actual_season = search_result.parsed_result.season_number
            search_result.actual_episodes = search_result.parsed_result.episode_numbers

            if not manual_search:
                if not (search_result.show.air_by_date
                        or search_result.show.sports):
                    if search_mode == 'sponly':
                        if search_result.parsed_result.episode_numbers:
                            log.debug(
                                'This is supposed to be a season pack search but the result {0} is not a valid '
                                'season pack, skipping it', search_result.name)
                            search_result.result_wanted = False
                            continue
                        elif not [
                                ep for ep in episodes
                                if search_result.parsed_result.season_number ==
                            (ep.season, ep.scene_season)[ep.series.is_scene]
                        ]:
                            log.debug(
                                'This season result {0} is for a season we are not searching for, '
                                'skipping it', search_result.name)
                            search_result.result_wanted = False
                            continue
                    else:
                        # I'm going to split these up for better readability
                        # Check if at least got a season parsed.
                        if search_result.parsed_result.season_number is None:
                            log.debug(
                                "The result {0} doesn't seem to have a valid season that we are currently trying to "
                                "snatch, skipping it", search_result.name)
                            search_result.result_wanted = False
                            continue

                        # Check if we at least got some episode numbers parsed.
                        if not search_result.parsed_result.episode_numbers:
                            log.debug(
                                "The result {0} doesn't seem to match an episode that we are currently trying to "
                                "snatch, skipping it", search_result.name)
                            search_result.result_wanted = False
                            continue

                        # Compare the episodes and season from the result with what was searched.
                        if not [
                                searched_episode
                                for searched_episode in episodes
                                if searched_episode.season ==
                                search_result.parsed_result.season_number and
                            (searched_episode.episode, searched_episode.
                             scene_episode)[searched_episode.series.is_scene]
                                in search_result.parsed_result.episode_numbers
                        ]:
                            log.debug(
                                "The result {0} doesn't seem to match an episode that we are currently trying to "
                                "snatch, skipping it", search_result.name)
                            search_result.result_wanted = False
                            continue

                    # We've performed some checks to decided if we want to continue with this result.
                    # If we've hit this, that means this is not an air_by_date and not a sports show. And it seems to be
                    # a valid result. Let's store the parsed season and episode number and continue.
                    search_result.actual_season = search_result.parsed_result.season_number
                    search_result.actual_episodes = search_result.parsed_result.episode_numbers
                else:
                    # air_by_date or sportshow.
                    search_result.same_day_special = False

                    if not search_result.parsed_result.is_air_by_date:
                        log.debug(
                            "This is supposed to be a date search but the result {0} didn't parse as one, "
                            "skipping it", search_result.name)
                        search_result.result_wanted = False
                        continue
                    else:
                        # Use a query against the tv_episodes table, to match the parsed air_date against.
                        air_date = search_result.parsed_result.air_date.toordinal(
                        )
                        db = DBConnection()
                        sql_results = db.select(
                            'SELECT season, episode FROM tv_episodes WHERE showid = ? AND airdate = ?',
                            [search_result.show.indexerid, air_date])

                        if len(sql_results) == 2:
                            if int(sql_results[0][b'season']) == 0 and int(
                                    sql_results[1][b'season']) != 0:
                                search_result.actual_season = int(
                                    sql_results[1][b'season'])
                                search_result.actual_episodes = [
                                    int(sql_results[1][b'episode'])
                                ]
                                search_result.same_day_special = True
                            elif int(sql_results[1][b'season']) == 0 and int(
                                    sql_results[0][b'season']) != 0:
                                search_result.actual_season = int(
                                    sql_results[0][b'season'])
                                search_result.actual_episodes = [
                                    int(sql_results[0][b'episode'])
                                ]
                                search_result.same_day_special = True
                        elif len(sql_results) != 1:
                            log.warning(
                                "Tried to look up the date for the episode {0} but the database didn't return proper "
                                "results, skipping it", search_result.name)
                            search_result.result_wanted = False
                            continue

                        # @TODO: Need to verify and test this.
                        if search_result.result_wanted and not search_result.same_day_special:
                            search_result.actual_season = int(
                                sql_results[0][b'season'])
                            search_result.actual_episodes = [
                                int(sql_results[0][b'episode'])
                            ]

        # Iterate again over the search results, and see if there is anything we want.
        for search_result in search_results:

            # Try to cache the item if we want to.
            cache_result = search_result.add_result_to_cache(self.cache)
            if cache_result is not None:
                cl.append(cache_result)

            if not search_result.result_wanted:
                log.debug(
                    "We aren't interested in this result: {0} with url: {1}",
                    search_result.name, search_result.url)
                continue

            log.debug('Found result {0} at {1}', search_result.name,
                      search_result.url)

            episode_object = search_result.create_episode_object()
            # result = self.get_result(episode_object, search_result)
            search_result.finish_search_result(self)

            if not episode_object:
                episode_number = SEASON_RESULT
                log.debug('Found season pack result {0} at {1}',
                          search_result.name, search_result.url)
            elif len(episode_object) == 1:
                episode_number = episode_object[0].episode
                log.debug('Found single episode result {0} at {1}',
                          search_result.name, search_result.url)
            else:
                episode_number = MULTI_EP_RESULT
                log.debug(
                    'Found multi-episode ({0}) result {1} at {2}', ', '.join(
                        map(str, search_result.parsed_result.episode_numbers)),
                    search_result.name, search_result.url)
            if episode_number not in results:
                results[episode_number] = [search_result]
            else:
                results[episode_number].append(search_result)

        if cl:
            # Access to a protected member of a client class
            db = self.cache._get_db()
            db.mass_action(cl)

        return results

    def get_id(self):
        """Get ID of the provider."""
        return GenericProvider.make_id(self.name)

    def get_quality(self, item, anime=False):
        """Get scene quality of the result."""
        (title, _) = self._get_title_and_url(item)
        quality = Quality.scene_quality(title, anime)

        return quality

    def get_result(self, episodes=None):
        """Get result."""
        return self._get_result(episodes)

    @staticmethod
    def get_url_hook(response, **kwargs):
        """Get URL hook."""
        request = response.request
        log.debug(
            '{method} URL: {url} [Status: {status}]', {
                'method': request.method,
                'url': request.url,
                'status': response.status_code,
            })
        log.debug('User-Agent: {}'.format(request.headers['User-Agent']))

        if request.method.upper() == 'POST':
            body = request.body
            # try to log post data using various codecs to decode
            if isinstance(body, unicode):
                log.debug('With post data: {0}', body)
                return

            codecs = ('utf-8', 'latin1', 'cp1252')
            for codec in codecs:
                try:
                    data = body.decode(codec)
                except UnicodeError as error:
                    log.debug('Failed to decode post data as {codec}: {msg}', {
                        'codec': codec,
                        'msg': error
                    })
                else:
                    log.debug('With post data: {0}', data)
                    break
            else:
                log.warning('Failed to decode post data with {codecs}',
                            {'codecs': codecs})

    def get_url(self, url, post_data=None, params=None, timeout=30, **kwargs):
        """Load the given URL."""
        log.info(
            'providers.generic_provider.get_url() is deprecated, '
            'please rewrite your provider to make use of the MedusaSession session class.'
        )
        kwargs['hooks'] = {'response': self.get_url_hook}

        if not post_data:
            return self.session.get(url,
                                    params=params,
                                    headers=self.headers,
                                    timeout=timeout,
                                    **kwargs)
        else:
            return self.session.post(url,
                                     post_data=post_data,
                                     params=params,
                                     headers=self.headers,
                                     timeout=timeout,
                                     **kwargs)

    def image_name(self):
        """Return provider image name."""
        return self.get_id() + '.png'

    def is_active(self):
        """Check if provider is active."""
        return False

    def is_enabled(self):
        """Check if provider is enabled."""
        return bool(self.enabled)

    @staticmethod
    def make_id(name):
        """Make ID of the provider."""
        if not name:
            return ''

        return re.sub(r'[^\w\d_]', '_', str(name).strip().lower())

    def search_rss(self, episodes):
        """Find cached needed episodes."""
        return self.cache.find_needed_episodes(episodes)

    def seed_ratio(self):
        """Return ratio."""
        return ''

    def _check_auth(self):
        """Check if we are autenticated."""
        return True

    def login(self):
        """Login to provider."""
        return True

    def search(self, search_strings, age=0, ep_obj=None, **kwargs):
        """Search the provider."""
        return []

    @staticmethod
    def parse_pubdate(pubdate, human_time=False, timezone=None, **kwargs):
        """
        Parse publishing date into a datetime object.

        :param pubdate: date and time string
        :param human_time: string uses human slang ("4 hours ago")
        :param timezone: use a different timezone ("US/Eastern")

        :keyword dayfirst: Interpret the first value as the day
        :keyword yearfirst: Interpret the first value as the year

        :returns: a datetime object or None
        """
        now_alias = ('right now', 'just now', 'now')

        df = kwargs.pop('dayfirst', False)
        yf = kwargs.pop('yearfirst', False)

        # This can happen from time to time
        if pubdate is None:
            log.debug('Skipping invalid publishing date.')
            return

        try:
            if human_time:
                if pubdate.lower() in now_alias:
                    seconds = 0
                else:
                    match = re.search(r'(?P<time>\d+\W*\w+)', pubdate)
                    seconds = parse(match.group('time'))
                return datetime.now(tz.tzlocal()) - timedelta(seconds=seconds)

            dt = parser.parse(pubdate, dayfirst=df, yearfirst=yf, fuzzy=True)
            # Always make UTC aware if naive
            if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:
                dt = dt.replace(tzinfo=tz.gettz('UTC'))
            if timezone:
                dt = dt.astimezone(tz.gettz(timezone))
            return dt

        except (AttributeError, TypeError, ValueError):
            log.exception('Failed parsing publishing date: {0}', pubdate)

    def _get_result(self, episodes=None):
        """Get result."""
        return SearchResult(episodes)

    def _get_episode_search_strings(self, episode, add_string=''):
        """Get episode search strings."""
        if not episode:
            return []

        search_string = {'Episode': []}

        all_possible_show_names = episode.series.get_all_possible_names()
        if episode.scene_season:
            all_possible_show_names = all_possible_show_names.union(
                episode.series.get_all_possible_names(
                    season=episode.scene_season))

        for show_name in all_possible_show_names:
            episode_string = show_name + self.search_separator
            episode_string_fallback = None

            if episode.series.air_by_date:
                episode_string += str(episode.airdate).replace('-', ' ')
            elif episode.series.sports:
                episode_string += str(episode.airdate).replace('-', ' ')
                episode_string += ('|', ' ')[len(self.proper_strings) > 1]
                episode_string += episode.airdate.strftime('%b')
            elif episode.series.anime:
                # If the showname is a season scene exception, we want to use the indexer episode number.
                if (episode.scene_season > 1
                        and show_name in get_scene_exceptions(
                            episode.series.indexerid, episode.series.indexer,
                            episode.scene_season)):
                    # This is apparently a season exception, let's use the scene_episode instead of absolute
                    ep = episode.scene_episode
                else:
                    ep = episode.scene_absolute_number
                episode_string_fallback = episode_string + '{episode:0>3}'.format(
                    episode=ep)
                episode_string += '{episode:0>2}'.format(episode=ep)
            else:
                episode_string += config.naming_ep_type[2] % {
                    'seasonnumber': episode.scene_season,
                    'episodenumber': episode.scene_episode,
                }

            if add_string:
                episode_string += self.search_separator + add_string
                if episode_string_fallback:
                    episode_string_fallback += self.search_separator + add_string

            search_string['Episode'].append(episode_string.strip())
            if episode_string_fallback:
                search_string['Episode'].append(
                    episode_string_fallback.strip())

        return [search_string]

    def _get_tvdb_id(self):
        """Return the tvdb id if the shows indexer is tvdb. If not, try to use the externals to get it."""
        if not self.show:
            return None
        return self.show.indexerid if self.show.indexer == INDEXER_TVDBV2 else self.show.externals.get(
            'tvdb_id')

    def _get_season_search_strings(self, episode):
        search_string = {'Season': []}

        for show_name in episode.series.get_all_possible_names(
                season=episode.scene_season):
            episode_string = show_name + self.search_separator

            if episode.series.air_by_date or episode.series.sports:
                search_string['Season'].append(
                    episode_string + str(episode.airdate).split('-')[0])
            elif episode.series.anime:
                search_string['Season'].append(episode_string + 'Season')
            else:
                for season_template in self.season_templates:
                    templated_episode_string = episode_string + season_template.format(
                        season=episode.scene_season)
                    search_string['Season'].append(
                        templated_episode_string.strip())

        return [search_string]

    def _get_size(self, item):
        """Return default size."""
        return -1

    def _get_storage_dir(self):
        """Return storage dir."""
        return ''

    def _get_result_info(self, item):
        """Return default seeders and leechers."""
        return -1, -1

    def _get_pubdate(self, item):
        """Return publish date of the item.

        If provider doesnt have _get_pubdate function this will be used
        """
        return None

    def _get_title_and_url(self, item):
        """Return title and url from result."""
        if not item:
            return '', ''

        title = item.get('title', '')
        url = item.get('link', '')

        if title:
            title = title.replace(' ', '.')
        else:
            title = ''

        if url:
            url = url.replace('&amp;', '&').replace('%26tr%3D', '&tr=')
        else:
            url = ''

        return title, url

    def _make_url(self, result):
        """Return url if result is a magnet link."""
        if not result:
            return '', ''

        urls = []
        filename = ''

        if result.url.startswith('magnet:'):
            try:
                info_hash = re.findall(r'urn:btih:([\w]{32,40})',
                                       result.url)[0].upper()

                try:
                    torrent_name = re.findall('dn=([^&]+)', result.url)[0]
                except Exception:
                    torrent_name = 'NO_DOWNLOAD_NAME'

                if len(info_hash) == 32:
                    info_hash = b16encode(b32decode(info_hash)).upper()

                if not info_hash:
                    log.error(
                        'Unable to extract torrent hash from magnet: {0}',
                        result.url)
                    return urls, filename

                urls = [
                    x.format(info_hash=info_hash, torrent_name=torrent_name)
                    for x in self.bt_cache_urls
                ]
                shuffle(urls)
            except Exception:
                log.error(
                    'Unable to extract torrent hash or name from magnet: {0}',
                    result.url)
                return urls, filename
        else:
            urls = [result.url]

        result_name = sanitize_filename(result.name)

        # Some NZB providers (e.g. Jackett) can also download torrents
        if (result.url.endswith(GenericProvider.TORRENT)
                or result.url.startswith('magnet:')
            ) and self.provider_type == GenericProvider.NZB:
            filename = join(app.TORRENT_DIR, result_name + '.torrent')
        else:
            filename = join(self._get_storage_dir(),
                            result_name + '.' + self.provider_type)

        return urls, filename

    def _verify_download(self, file_name=None):
        return True

    @property
    def recent_results(self):
        """Return recent RSS results from provier."""
        return recent_results.get(self.get_id(), [])

    @recent_results.setter
    def recent_results(self, items):
        """Set recent results from provider."""
        if not recent_results.get(self.get_id()):
            recent_results.update({self.get_id(): []})
        if items:
            add_to_list = []
            for item in items:
                if item['link'] not in {
                        cache_item['link']
                        for cache_item in recent_results[self.get_id()]
                }:
                    add_to_list += [item]
            results = add_to_list + recent_results[self.get_id()]
            recent_results[self.get_id()] = results[:self.max_recent_items]

    def add_cookies_from_ui(self):
        """
        Add the cookies configured from UI to the providers requests session.

        :return: A dict with the the keys result as bool and message as string
        """
        # Added exception for rss torrent providers, as for them adding cookies initial should be optional.
        from medusa.providers.torrent.rss.rsstorrent import TorrentRssProvider
        if isinstance(self, TorrentRssProvider) and not self.cookies:
            return {
                'result':
                True,
                'message':
                'This is a TorrentRss provider without any cookies provided. '
                'Cookies for this provider are considered optional.'
            }

        # This is the generic attribute used to manually add cookies for provider authentication
        if not self.enable_cookies:
            return {
                'result':
                False,
                'message':
                'Adding cookies is not supported for provider: {0}'.format(
                    self.name)
            }

        if not self.cookies:
            return {
                'result':
                False,
                'message':
                'No Cookies added from ui for provider: {0}'.format(self.name)
            }

        cookie_validator = re.compile(r'^([\w%]+=[\w%]+)(;[\w%]+=[\w%]+)*$')
        if not cookie_validator.match(self.cookies):
            ui.notifications.message(
                'Failed to validate cookie for provider {provider}'.format(
                    provider=self.name),
                'Cookie is not correctly formatted: {0}'.format(self.cookies))
            return {
                'result':
                False,
                'message':
                'Cookie is not correctly formatted: {0}'.format(self.cookies)
            }

        if not all(req_cookie in
                   [x.rsplit('=', 1)[0] for x in self.cookies.split(';')]
                   for req_cookie in self.required_cookies):
            return {
                'result':
                False,
                'message':
                "You haven't configured the requied cookies. Please login at {provider_url}, "
                "and make sure you have copied the following cookies: {required_cookies!r}"
                .format(provider_url=self.name,
                        required_cookies=self.required_cookies)
            }

        # cookie_validator got at least one cookie key/value pair, let's return success
        add_dict_to_cookiejar(
            self.session.cookies,
            dict(x.rsplit('=', 1) for x in self.cookies.split(';')))
        return {'result': True, 'message': ''}

    def check_required_cookies(self):
        """
        Check if we have the required cookies in the requests sessions object.

        Meaning that we've already successfully authenticated once, and we don't need to go through this again.
        Note! This doesn't mean the cookies are correct!
        """
        if not hasattr(self, 'required_cookies'):
            # A reminder for the developer, implementing cookie based authentication.
            log.error(
                'You need to configure the required_cookies attribute, for the provider: {provider}',
                {'provider': self.name})
            return False
        return all(
            dict_from_cookiejar(self.session.cookies).get(cookie)
            for cookie in self.required_cookies)

    def cookie_login(self, check_login_text, check_url=None):
        """
        Check the response for text that indicates a login prompt.

        In that case, the cookie authentication was not successful.
        :param check_login_text: A string that's visible when the authentication failed.
        :param check_url: The url to use to test the login with cookies. By default the providers home page is used.

        :return: False when authentication was not successful. True if successful.
        """
        check_url = check_url or self.url

        if self.check_required_cookies():
            # All required cookies have been found within the current session, we don't need to go through this again.
            return True

        if self.cookies:
            result = self.add_cookies_from_ui()
            if not result['result']:
                ui.notifications.message(result['message'])
                log.warning(result['message'])
                return False
        else:
            log.warning(
                'Failed to login, you will need to add your cookies in the provider settings'
            )
            ui.notifications.error(
                'Failed to auth with {provider}'.format(provider=self.name),
                'You will need to add your cookies in the provider settings')
            return False

        response = self.session.get(check_url)
        if not response or any([
                not (response.text and response.status_code == 200),
                check_login_text.lower() in response.text.lower()
        ]):
            log.warning(
                'Please configure the required cookies for this provider. Check your provider settings'
            )
            ui.notifications.error(
                'Wrong cookies for {provider}'.format(provider=self.name),
                'Check your provider settings')
            self.session.cookies.clear()
            return False
        else:
            return True

    def __str__(self):
        """Return provider name and provider type."""
        return '{provider_name} ({provider_type})'.format(
            provider_name=self.name, provider_type=self.provider_type)

    def __unicode__(self):
        """Return provider name and provider type."""
        return '{provider_name} ({provider_type})'.format(
            provider_name=self.name, provider_type=self.provider_type)
示例#10
0
 def __init__(self):
     self.amActive = False
     self.updater = self.find_install_type()
     self.session = MedusaSafeSession()
示例#11
0
class CheckVersion(object):
    """Version check class meant to run as a thread object with the sr scheduler."""
    def __init__(self):
        self.amActive = False
        self.updater = self.find_install_type()
        self.session = MedusaSafeSession()

    def run(self, force=False):

        self.amActive = True

        # Update remote branches and store in app.GIT_REMOTE_BRANCHES
        self.list_remote_branches()

        if self.updater:
            # set current branch version
            app.BRANCH = self.get_branch()

            if self.check_for_new_version(force):
                if app.AUTO_UPDATE:
                    log.info(u'New update found, starting auto-updater ...')
                    ui.notifications.message(
                        'New update found, starting auto-updater')
                    if self.run_backup_if_safe():
                        if self.update():
                            log.info(u'Update was successful!')
                            ui.notifications.message('Update was successful')
                            app.events.put(app.events.SystemEvent.RESTART)
                        else:
                            log.info(u'Update failed!')
                            ui.notifications.message('Update failed!')

            self.check_for_new_news(force)

        self.amActive = False

    def run_backup_if_safe(self):
        return self.safe_to_update() is True and self._runbackup() is True

    def _runbackup(self):
        # Do a system backup before update
        log.info(u'Config backup in progress...')
        ui.notifications.message('Backup', 'Config backup in progress...')
        try:
            backupDir = os.path.join(app.DATA_DIR, app.BACKUP_DIR)
            if not os.path.isdir(backupDir):
                os.mkdir(backupDir)

            if self._keeplatestbackup(backupDir) and self._backup(backupDir):
                log.info(u'Config backup successful')
                ui.notifications.message('Backup', 'Config backup successful')
                return True
            else:
                log.warning(u'Config backup failed')
                ui.notifications.message('Backup', 'Config backup failed')
                return False
        except Exception as e:
            log.error(u'Update: Config backup failed. Error: {0!r}', e)
            ui.notifications.message('Backup', 'Config backup failed')
            return False

    @staticmethod
    def _keeplatestbackup(backupDir=None):
        if not backupDir:
            return False

        import glob
        files = glob.glob(os.path.join(backupDir, '*.zip'))
        if not files:
            return True

        now = time.time()
        newest = files[0], now - os.path.getctime(files[0])
        for f in files[1:]:
            age = now - os.path.getctime(f)
            if age < newest[1]:
                newest = f, age
        files.remove(newest[0])

        for f in files:
            os.remove(f)

        return True

    # TODO: Merge with backup in helpers
    @staticmethod
    def _backup(backupDir=None):
        if not backupDir:
            return False
        source = [
            os.path.join(app.DATA_DIR, app.APPLICATION_DB), app.CONFIG_FILE,
            os.path.join(app.DATA_DIR, app.FAILED_DB),
            os.path.join(app.DATA_DIR, app.CACHE_DB)
        ]
        target = os.path.join(
            backupDir,
            app.BACKUP_FILENAME.format(
                timestamp=time.strftime('%Y%m%d%H%M%S')))

        for (path, dirs, files) in os.walk(app.CACHE_DIR, topdown=True):
            for dirname in dirs:
                if path == app.CACHE_DIR and dirname not in ['images']:
                    dirs.remove(dirname)
            for filename in files:
                source.append(os.path.join(path, filename))

        return helpers.backup_config_zip(source, target, app.DATA_DIR)

    def safe_to_update(self):
        def db_safe(self):
            message = {
                'equal': {
                    'type':
                    DEBUG,
                    'text':
                    u'We can proceed with the update. New update has same DB version'
                },
                'upgrade': {
                    'type':
                    DEBUG,
                    'text':
                    u'We can proceed with the update. New update has a new DB version'
                },
                'downgrade': {
                    'type':
                    WARNING,
                    'text':
                    u"We can't proceed with the update. New update has an old DB version. It's not possible to downgrade"
                },
            }
            try:
                result = self.getDBcompare()
                if result in message:
                    log.log(
                        message[result]['type'], message[result]
                        ['text'])  # unpack the result message into a log entry
                else:
                    log.warning(
                        u"We can't proceed with the update. Unable to check remote DB version. Error: {0}",
                        result)
                return result in ['equal', 'upgrade'
                                  ]  # add future True results to the list
            except Exception as error:
                log.error(
                    u"We can't proceed with the update. Unable to compare DB version. Error: {0!r}",
                    error)
                return False

        def postprocessor_safe():
            if not app.post_processor_scheduler.action.amActive:
                log.debug(
                    u'We can proceed with the update. Post-Processor is not running'
                )
                return True
            else:
                log.debug(
                    u"We can't proceed with the update. Post-Processor is running"
                )
                return False

        def showupdate_safe():
            if app.show_update_scheduler.action.amActive:
                log.debug(
                    u"We can't proceed with the update. Shows are being updated"
                )
                return False

            if app.episode_update_scheduler.action.amActive:
                log.debug(
                    u"We can't proceed with the update. Episodes are being updated"
                )
                return False

            log.debug(
                u'We can proceed with the update. Shows or episodes are not being updated'
            )
            return True

        db_safe = db_safe(self)
        postprocessor_safe = postprocessor_safe()
        showupdate_safe = showupdate_safe()

        if db_safe and postprocessor_safe and showupdate_safe:
            log.debug(u'Proceeding with auto update')
            return True
        else:
            log.debug(u'Auto update aborted')
            return False

    def getDBcompare(self):
        """
        Compare the current DB version with the new branch version.

        :return: 'upgrade', 'equal', or 'downgrade'
        """
        try:
            self.updater.need_update()
            cur_hash = str(self.updater.newest_commit_hash)
            assert len(
                cur_hash
            ) == 40, 'Commit hash wrong length: {length} hash: {hash}'.format(
                length=len(cur_hash), hash=cur_hash)

            check_url = 'http://rawcdn.githack.com/{org}/{repo}/{commit}/medusa/databases/main_db.py'.format(
                org=app.GIT_ORG, repo=app.GIT_REPO, commit=cur_hash)
            response = self.session.get(check_url)

            # Get remote DB version
            match_max_db = re.search(
                r'MAX_DB_VERSION\s*=\s*(?P<version>\d{2,3})', response.text)
            new_branch_major_db_version = int(
                match_max_db.group('version')) if match_max_db else None

            # Check local DB version
            main_db_con = db.DBConnection()
            cur_branch_major_db_version, cur_branch_minor_db_version = main_db_con.checkDBVersion(
            )

            if any([
                    cur_branch_major_db_version is None,
                    cur_branch_minor_db_version is None,
                    new_branch_major_db_version is None
            ]):
                return 'Could not compare database versions, aborting'

            if new_branch_major_db_version > cur_branch_major_db_version:
                return 'upgrade'
            elif new_branch_major_db_version == cur_branch_major_db_version:
                return 'equal'
            else:
                return 'downgrade'
        except Exception as e:
            return repr(e)

    def find_install_type(self):
        """
        Determine how this copy of Medusa was installed.

        :return: type of installation. Possible values are:
            'docker': any docker build
            'git': running from source using git
            'source': running from source without git
        """
        if self.runs_in_docker():
            return DockerUpdateManager()
        elif os.path.isdir(os.path.join(app.PROG_DIR, u'.git')):
            return GitUpdateManager()

        return SourceUpdateManager()

    def check_for_new_version(self, force=False):
        """
        Check the internet for a newer version.

        :force: if true the VERSION_NOTIFY setting will be ignored and a check will be forced
        :return: bool, True for new version or False for no new version.
        """
        if not self.updater or (not app.VERSION_NOTIFY and not app.AUTO_UPDATE
                                and not force):
            log.info(
                u'Version checking is disabled, not checking for the newest version'
            )
            app.NEWEST_VERSION_STRING = None
            return False

        # checking for updates
        if not app.AUTO_UPDATE:
            log.info(u'Checking for updates using {0}', self.updater)

        if not self.updater.need_update():
            app.NEWEST_VERSION_STRING = None

            if force:
                ui.notifications.message('No update needed')
                log.info(u'No update needed')

            # no updates needed
            return False

        # found updates
        return self.updater.can_update()

    def check_for_new_news(self, force=False):
        """
        Check GitHub for the latest news.

        :return: unicode, a copy of the news
        :force: ignored
        """
        # Grab a copy of the news
        log.debug(u'Checking GitHub for latest news.')
        response = self.session.get(app.NEWS_URL)
        if not response or not response.text:
            log.debug(u'Could not load news from URL: {0}', app.NEWS_URL)
            return

        try:
            last_read = datetime.datetime.strptime(app.NEWS_LAST_READ,
                                                   '%Y-%m-%d')
        except ValueError:
            log.warning(u'Invalid news last read date: {0}',
                        app.NEWS_LAST_READ)
            last_read = 0

        news = response.text
        app.NEWS_UNREAD = 0
        got_latest = False
        for match in re.finditer(r'^####\s*(\d{4}-\d{2}-\d{2})\s*####', news,
                                 re.M):
            if not got_latest:
                got_latest = True
                app.NEWS_LATEST = match.group(1)

            try:
                if datetime.datetime.strptime(match.group(1),
                                              '%Y-%m-%d') > last_read:
                    app.NEWS_UNREAD += 1
            except ValueError:
                log.warning(
                    u'Unable to match latest news date. Repository news date: {0}',
                    match.group(1))
                pass

        return news

    def need_update(self):
        if self.updater:
            return self.updater.need_update()

    def update(self):
        if self.updater:
            # update branch with current config branch value
            self.updater.branch = app.BRANCH

            # check for updates
            if self.updater.need_update() and self.updater.can_update():
                return self.updater.update()

        return False

    def list_remote_branches(self):
        if self.updater:
            app.GIT_REMOTE_BRANCHES = self.updater.list_remote_branches()
        return app.GIT_REMOTE_BRANCHES

    def get_branch(self):
        if self.updater:
            return self.updater.branch

    @staticmethod
    def runs_in_docker():
        """
        Check if Medusa is run in a docker container.

        If run in a container, we don't want to use the auto update feature, but just want to inform the user
        there is an update available. The user can update through getting the latest docker tag.
        """
        if app.RUNS_IN_DOCKER is not None:
            return app.RUNS_IN_DOCKER

        path = '/proc/{pid}/cgroup'.format(pid=os.getpid())
        try:
            if not os.path.isfile(path):
                return False

            with open(path) as f:
                for line in f:
                    if re.match(r'\d+:[\w=]+:/docker(-[ce]e)?/\w+', line):
                        log.debug(u'Running in a docker container')
                        app.RUNS_IN_DOCKER = True
                        return True
                return False
        except (EnvironmentError, OSError) as error:
            log.info(
                u'Tried to check the path {path} if we are running in a docker container, '
                u'but an error occurred: {error}', {
                    'path': path,
                    'error': error
                })
            return False
示例#12
0
 def __init__(self):
     self.amActive = False
     self.updater = self.find_install_type()
     self.session = MedusaSafeSession()
示例#13
0
class CheckVersion(object):
    """Version check class meant to run as a thread object with the sr scheduler."""

    def __init__(self):
        self.amActive = False
        self.updater = self.find_install_type()
        self.session = MedusaSafeSession()

    def run(self, force=False):

        self.amActive = True

        # Update remote branches and store in app.GIT_REMOTE_BRANCHES
        self.list_remote_branches()

        if self.updater:
            # set current branch version
            app.BRANCH = self.get_branch()

            if self.check_for_new_version(force):
                if app.AUTO_UPDATE:
                    log.info(u'New update found, starting auto-updater ...')
                    ui.notifications.message('New update found, starting auto-updater')
                    if self.run_backup_if_safe():
                        if self.update():
                            log.info(u'Update was successful!')
                            ui.notifications.message('Update was successful')
                            app.events.put(app.events.SystemEvent.RESTART)
                        else:
                            log.info(u'Update failed!')
                            ui.notifications.message('Update failed!')

            self.check_for_new_news(force)

        self.amActive = False

    def run_backup_if_safe(self):
        return self.safe_to_update() is True and self._runbackup() is True

    def _runbackup(self):
        # Do a system backup before update
        log.info(u'Config backup in progress...')
        ui.notifications.message('Backup', 'Config backup in progress...')
        try:
            backupDir = os.path.join(app.DATA_DIR, app.BACKUP_DIR)
            if not os.path.isdir(backupDir):
                os.mkdir(backupDir)

            if self._keeplatestbackup(backupDir) and self._backup(backupDir):
                log.info(u'Config backup successful, updating...')
                ui.notifications.message('Backup', 'Config backup successful, updating...')
                return True
            else:
                log.warning(u'Config backup failed, aborting update')
                ui.notifications.message('Backup', 'Config backup failed, aborting update')
                return False
        except Exception as e:
            log.error(u'Update: Config backup failed. Error: {0!r}', e)
            ui.notifications.message('Backup', 'Config backup failed, aborting update')
            return False

    @staticmethod
    def _keeplatestbackup(backupDir=None):
        if not backupDir:
            return False

        import glob
        files = glob.glob(os.path.join(backupDir, '*.zip'))
        if not files:
            return True

        now = time.time()
        newest = files[0], now - os.path.getctime(files[0])
        for f in files[1:]:
            age = now - os.path.getctime(f)
            if age < newest[1]:
                newest = f, age
        files.remove(newest[0])

        for f in files:
            os.remove(f)

        return True

    # TODO: Merge with backup in helpers
    @staticmethod
    def _backup(backupDir=None):
        if not backupDir:
            return False
        source = [
            os.path.join(app.DATA_DIR, app.APPLICATION_DB),
            app.CONFIG_FILE,
            os.path.join(app.DATA_DIR, app.FAILED_DB),
            os.path.join(app.DATA_DIR, app.CACHE_DB)
        ]
        target = os.path.join(backupDir, app.BACKUP_FILENAME.format(timestamp=time.strftime('%Y%m%d%H%M%S')))

        for (path, dirs, files) in os.walk(app.CACHE_DIR, topdown=True):
            for dirname in dirs:
                if path == app.CACHE_DIR and dirname not in ['images']:
                    dirs.remove(dirname)
            for filename in files:
                source.append(os.path.join(path, filename))

        return helpers.backup_config_zip(source, target, app.DATA_DIR)

    def safe_to_update(self):

        def db_safe(self):
            message = {
                'equal': {
                    'type': DEBUG,
                    'text': u'We can proceed with the update. New update has same DB version'},
                'upgrade': {
                    'type': DEBUG,
                    'text': u'We can proceed with the update. New update has a new DB version'},
                'downgrade': {
                    'type': WARNING,
                    'text': u"We can't proceed with the update. New update has an old DB version. It's not possible to downgrade"},
            }
            try:
                result = self.getDBcompare()
                if result in message:
                    log.log(message[result]['type'], message[result]['text'])  # unpack the result message into a log entry
                else:
                    log.warning(u"We can't proceed with the update. Unable to check remote DB version. Error: {0}", result)
                return result in ['equal', 'upgrade']  # add future True results to the list
            except Exception as error:
                log.error(u"We can't proceed with the update. Unable to compare DB version. Error: {0!r}", error)
                return False

        def postprocessor_safe():
            if not app.auto_post_processor_scheduler.action.amActive:
                log.debug(u'We can proceed with the update. Post-Processor is not running')
                return True
            else:
                log.debug(u"We can't proceed with the update. Post-Processor is running")
                return False

        def showupdate_safe():
            if not app.show_update_scheduler.action.amActive:
                log.debug(u'We can proceed with the update. Shows are not being updated')
                return True
            else:
                log.debug(u"We can't proceed with the update. Shows are being updated")
                return False

        db_safe = db_safe(self)
        postprocessor_safe = postprocessor_safe()
        showupdate_safe = showupdate_safe()

        if db_safe and postprocessor_safe and showupdate_safe:
            log.debug(u'Proceeding with auto update')
            return True
        else:
            log.debug(u'Auto update aborted')
            return False

    def getDBcompare(self):
        """
        Compare the current DB version with the new branch version.

        :return: 'upgrade', 'equal', or 'downgrade'
        """
        try:
            self.updater.need_update()
            cur_hash = str(self.updater.newest_commit_hash)
            assert len(cur_hash) == 40, 'Commit hash wrong length: {length} hash: {hash}'.format(
                length=len(cur_hash), hash=cur_hash)

            check_url = 'http://rawcdn.githack.com/{org}/{repo}/{commit}/medusa/databases/main_db.py'.format(
                org=app.GIT_ORG, repo=app.GIT_REPO, commit=cur_hash)
            response = self.session.get(check_url)

            # Get remote DB version
            match_max_db = re.search(r'MAX_DB_VERSION\s*=\s*(?P<version>\d{2,3})', response.text)
            new_branch_major_db_version = int(match_max_db.group('version')) if match_max_db else None

            # Check local DB version
            main_db_con = db.DBConnection()
            cur_branch_major_db_version, cur_branch_minor_db_version = main_db_con.checkDBVersion()

            if any([cur_branch_major_db_version is None, cur_branch_minor_db_version is None,
                    new_branch_major_db_version is None]):
                return 'Could not compare database versions, aborting'

            if new_branch_major_db_version > cur_branch_major_db_version:
                return 'upgrade'
            elif new_branch_major_db_version == cur_branch_major_db_version:
                return 'equal'
            else:
                return 'downgrade'
        except Exception as e:
            return repr(e)

    def find_install_type(self):
        """
        Determine how this copy of Medusa was installed.

        :return: type of installation. Possible values are:
            'docker': any docker build
            'git': running from source using git
            'source': running from source without git
        """
        if self.runs_in_docker():
            return DockerUpdateManager()
        elif os.path.isdir(os.path.join(app.PROG_DIR, u'.git')):
            return GitUpdateManager()

        return SourceUpdateManager()

    def check_for_new_version(self, force=False):
        """
        Check the internet for a newer version.

        :force: if true the VERSION_NOTIFY setting will be ignored and a check will be forced
        :return: bool, True for new version or False for no new version.
        """
        if not self.updater or (not app.VERSION_NOTIFY and not app.AUTO_UPDATE and not force):
            log.info(u'Version checking is disabled, not checking for the newest version')
            app.NEWEST_VERSION_STRING = None
            return False

        # checking for updates
        if not app.AUTO_UPDATE:
            log.info(u'Checking for updates using {0}', self.updater)

        if not self.updater.need_update():
            app.NEWEST_VERSION_STRING = None

            if force:
                ui.notifications.message('No update needed')
                log.info(u'No update needed')

            # no updates needed
            return False

        # found updates
        return self.updater.can_update()

    def check_for_new_news(self, force=False):
        """
        Check GitHub for the latest news.

        :return: unicode, a copy of the news
        :force: ignored
        """
        # Grab a copy of the news
        log.debug(u'Checking GitHub for latest news.')
        response = self.session.get(app.NEWS_URL)
        if not response or not response.text:
            log.debug(u'Could not load news from URL: {0}', app.NEWS_URL)
            return

        try:
            last_read = datetime.datetime.strptime(app.NEWS_LAST_READ, '%Y-%m-%d')
        except ValueError:
            log.warning(u'Invalid news last read date: {0}', app.NEWS_LAST_READ)
            last_read = 0

        news = response.text
        app.NEWS_UNREAD = 0
        got_latest = False
        for match in re.finditer(r'^####\s*(\d{4}-\d{2}-\d{2})\s*####', news, re.M):
            if not got_latest:
                got_latest = True
                app.NEWS_LATEST = match.group(1)

            try:
                if datetime.datetime.strptime(match.group(1), '%Y-%m-%d') > last_read:
                    app.NEWS_UNREAD += 1
            except ValueError:
                log.warning(u'Unable to match latest news date. Repository news date: {0}', match.group(1))
                pass

        return news

    def need_update(self):
        if self.updater:
            return self.updater.need_update()

    def update(self):
        if self.updater:
            # update branch with current config branch value
            self.updater.branch = app.BRANCH

            # check for updates
            if self.updater.need_update() and self.updater.can_update():
                return self.updater.update()

        return False

    def list_remote_branches(self):
        if self.updater:
            app.GIT_REMOTE_BRANCHES = self.updater.list_remote_branches()
        return app.GIT_REMOTE_BRANCHES

    def get_branch(self):
        if self.updater:
            return self.updater.branch

    @staticmethod
    def runs_in_docker():
        """
        Check if Medusa is run in a docker container.

        If run in a container, we don't want to use the auto update feature, but just want to inform the user
        there is an update available. The user can update through getting the latest docker tag.
        """
        if app.RUNS_IN_DOCKER is not None:
            return app.RUNS_IN_DOCKER

        path = '/proc/{pid}/cgroup'.format(pid=os.getpid())
        try:
            if not os.path.isfile(path):
                return False

            with open(path) as f:
                for line in f:
                    if re.match(r'\d+:[\w=]+:/docker(-[ce]e)?/\w+', line):
                        log.debug(u'Running in a docker container')
                        app.RUNS_IN_DOCKER = True
                        return True
                return False
        except (EnvironmentError, OSError) as error:
            log.info(u'Tried to check the path {path} if we are running in a docker container, '
                     u'but an error occurred: {error}', {'path': path, 'error': error})
            return False
示例#14
0
class GenericProvider(object):
    """Generic provider."""

    NZB = 'nzb'
    TORRENT = 'torrent'

    def __init__(self, name):
        """Initialize the class."""
        self.name = name

        self.anime_only = False
        self.cache = tv.Cache(self)
        self.enable_backlog = False
        self.enable_manualsearch = False
        self.enable_daily = False
        self.enabled = False
        self.headers = {'User-Agent': USER_AGENT}
        self.proper_strings = ['PROPER|REPACK|REAL|RERIP']
        self.provider_type = None
        self.public = False
        self.search_fallback = False
        self.search_mode = None
        self.session = MedusaSafeSession(cloudflare=True)
        self.session.headers.update(self.headers)
        self.series = None
        self.supports_absolute_numbering = False
        self.supports_backlog = True
        self.url = ''
        self.urls = {}

        # Ability to override the search separator. As for example anizb is using '*' instead of space.
        self.search_separator = ' '
        self.season_templates = (
            'S{season:0>2}',  # example: 'Series.Name.S03'
        )

        # Use and configure the attribute enable_cookies to show or hide the cookies input field per provider
        self.enable_cookies = False
        self.cookies = ''

        # Paramaters for reducting the daily search results parsing
        self.max_recent_items = 5
        self.stop_at = 3

        # Delay downloads
        self.enable_search_delay = False
        self.search_delay = 480  # minutes

    @classmethod
    def kind(cls):
        """Return the name of the current class."""
        return cls.__name__

    def download_result(self, result):
        """Download result from provider."""
        if not self.login():
            return False

        urls, filename = self._make_url(result)

        for url in urls:
            if 'NO_DOWNLOAD_NAME' in url:
                continue

            if url.startswith('http'):
                self.headers.update({
                    'Referer': '/'.join(url.split('/')[:3]) + '/'
                })

            log.info('Downloading {result} from {provider} at {url}',
                     {'result': result.name, 'provider': self.name, 'url': url})

            verify = False if self.public else None

            if download_file(url, filename, session=self.session, headers=self.headers,
                             verify=verify):

                if self._verify_download(filename):
                    log.info('Saved {result} to {location}',
                             {'result': result.name, 'location': filename})
                    return True

        log.warning('Failed to download any results for {result}',
                    {'result': result.name})

        return False

    def _make_url(self, result):
        """Return url if result is a magnet link."""
        urls = []
        filename = ''

        if not result or not result.url:
            return urls, filename

        urls = [result.url]
        result_name = sanitize_filename(result.name)

        # TODO: Remove this in future versions, kept for the warning
        # Some NZB providers (e.g. Jackett) can also download torrents
        # A similar check is performed for NZB splitting in medusa/search/core.py @ search_providers()
        if (result.url.endswith(GenericProvider.TORRENT) or
                result.url.startswith('magnet:')) and self.provider_type == GenericProvider.NZB:
            filename = join(app.TORRENT_DIR, result_name + '.torrent')
            log.warning('Using Jackett providers as Newznab providers is deprecated!'
                        ' Switch them to Jackett providers as soon as possible.')
        else:
            filename = join(self._get_storage_dir(), result_name + '.' + self.provider_type)

        return urls, filename

    def _verify_download(self, file_name=None):
        return True

    def get_content(self, url, params=None, timeout=30, **kwargs):
        """Retrieve the torrent/nzb content."""
        return self.session.get_content(url, params=params, timeout=timeout, **kwargs)

    def find_propers(self, proper_candidates):
        """Find propers in providers."""
        results = []

        for proper_candidate in proper_candidates:
            series_obj = Show.find_by_id(app.showList, proper_candidate['indexer'], proper_candidate['showid'])

            if series_obj:
                self.series = series_obj
                episode_obj = series_obj.get_episode(proper_candidate['season'], proper_candidate['episode'])

                for term in self.proper_strings:
                    search_strings = self._get_episode_search_strings(episode_obj, add_string=term)

                    for item in self.search(search_strings[0], ep_obj=episode_obj):
                        search_result = self.get_result(series=series_obj, item=item)
                        if search_result in results:
                            continue

                        search_result.search_type = PROPER_SEARCH
                        results.append(search_result)

        return results

    def search_results_in_cache(self, episodes):
        """
        Search episodes based on param in cache.

        Search the cache (db) for this provider
        :param episodes: List of Episode objects

        :return: A dict of search results, ordered by episode number
        """
        return self.cache.find_episodes(episodes)

    def find_search_results(self, series, episodes, search_mode, forced_search=False, download_current_quality=False,
                            manual_search=False, manual_search_type='episode'):
        """
        Search episodes based on param.

        Search the provider using http queries.
        :param series: Series object
        :param episodes: List of Episode objects
        :param search_mode: 'eponly' or 'sponly'
        :param forced_search: Flag if the search was triggered by a forced search
        :param download_current_quality: Flag if we want to include an already downloaded quality in the new search
        :param manual_search: Flag if the search was triggered by a manual search
        :param manual_search_type: How the manual search was started: For example an 'episode' or 'season'

        :return: A dict of search results, ordered by episode number.
        """
        self._check_auth()
        self.series = series

        season_search = (len(episodes) > 1 or manual_search_type == 'season') and search_mode == 'sponly'
        results = []

        for episode in episodes:
            search_strings = []
            if season_search:
                search_strings = self._get_season_search_strings(episode)
            elif search_mode == 'eponly':
                search_strings = self._get_episode_search_strings(episode)

            for search_string in search_strings:
                # Find results from the provider
                items = self.search(search_string, ep_obj=episode, manual_search=manual_search)
                for item in items:
                    result = self.get_result(series=series, item=item)
                    if result not in results:
                        result.quality = Quality.quality_from_name(result.name, series.is_anime)
                        results.append(result)

            # In season search, we can't loop in episodes lists as we
            # only need one episode to get the season string
            if search_mode == 'sponly':
                break

        log.debug('Found {0} unique search results', len(results))

        # sort qualities in descending order
        results.sort(key=operator.attrgetter('quality'), reverse=True)

        # Move through each item and parse with NameParser()
        for search_result in results:

            if forced_search:
                search_result.search_type = FORCED_SEARCH
            search_result.download_current_quality = download_current_quality
            search_result.result_wanted = True

            try:
                search_result.parsed_result = NameParser(
                    parse_method=('normal', 'anime')[series.is_anime]).parse(
                        search_result.name)
            except (InvalidNameException, InvalidShowException) as error:
                log.debug('Error during parsing of release name: {release_name}, with error: {error}',
                          {'release_name': search_result.name, 'error': error})
                search_result.add_cache_entry = False
                search_result.result_wanted = False
                continue

            # I don't know why i'm doing this. Maybe remove it later on all together, now i've added the parsed_result
            # to the search_result.
            search_result.series = search_result.parsed_result.series
            search_result.quality = search_result.parsed_result.quality
            search_result.release_group = search_result.parsed_result.release_group
            search_result.version = search_result.parsed_result.version
            search_result.actual_season = search_result.parsed_result.season_number
            search_result.actual_episodes = search_result.parsed_result.episode_numbers

            if not manual_search:
                if not (search_result.series.air_by_date or search_result.series.sports):
                    if search_mode == 'sponly':
                        if search_result.parsed_result.episode_numbers:
                            log.debug(
                                'This is supposed to be a season pack search but the result {0} is not a valid '
                                'season pack, skipping it', search_result.name
                            )
                            search_result.result_wanted = False
                            continue
                        elif not [ep for ep in episodes if
                                  search_result.parsed_result.season_number == (ep.season, ep.scene_season)
                                  [ep.series.is_scene]]:
                            log.debug(
                                'This season result {0} is for a season we are not searching for, '
                                'skipping it', search_result.name
                            )
                            search_result.result_wanted = False
                            continue
                    else:
                        # I'm going to split these up for better readability
                        # Check if at least got a season parsed.
                        if search_result.parsed_result.season_number is None:
                            log.debug(
                                "The result {0} doesn't seem to have a valid season that we are currently trying to "
                                'snatch, skipping it', search_result.name
                            )
                            search_result.result_wanted = False
                            continue

                        # Check if we at least got some episode numbers parsed.
                        if not search_result.parsed_result.episode_numbers:
                            log.debug(
                                "The result {0} doesn't seem to match an episode that we are currently trying to "
                                'snatch, skipping it', search_result.name
                            )
                            search_result.result_wanted = False
                            continue

                        # Compare the episodes and season from the result with what was searched.
                        wanted_ep = False
                        for searched_ep in episodes:
                            if searched_ep.series.is_scene:
                                season = searched_ep.scene_season
                                episode = searched_ep.scene_episode
                            else:
                                season = searched_ep.season
                                episode = searched_ep.episode

                            if (season == search_result.parsed_result.season_number
                                    and episode in search_result.parsed_result.episode_numbers):
                                wanted_ep = True
                                break

                        if not wanted_ep:
                            log.debug(
                                "The result {0} doesn't seem to match an episode that we are currently trying to "
                                'snatch, skipping it', search_result.name
                            )
                            search_result.result_wanted = False
                            continue

                    # We've performed some checks to decided if we want to continue with this result.
                    # If we've hit this, that means this is not an air_by_date and not a sports show. And it seems to be
                    # a valid result. Let's store the parsed season and episode number and continue.
                    search_result.actual_season = search_result.parsed_result.season_number
                    search_result.actual_episodes = search_result.parsed_result.episode_numbers
                else:
                    # air_by_date or sportshow.
                    search_result.same_day_special = False

                    if not search_result.parsed_result.is_air_by_date:
                        log.debug(
                            "This is supposed to be a date search but the result {0} didn't parse as one, "
                            'skipping it', search_result.name
                        )
                        search_result.result_wanted = False
                        continue
                    else:
                        # Use a query against the tv_episodes table, to match the parsed air_date against.
                        air_date = search_result.parsed_result.air_date.toordinal()
                        db = DBConnection()
                        sql_results = db.select(
                            'SELECT season, episode FROM tv_episodes WHERE indexer = ? AND showid = ? AND airdate = ?',
                            [search_result.series.indexer, search_result.series.series_id, air_date]
                        )

                        if len(sql_results) == 2:
                            if int(sql_results[0]['season']) == 0 and int(sql_results[1]['season']) != 0:
                                search_result.actual_season = int(sql_results[1]['season'])
                                search_result.actual_episodes = [int(sql_results[1]['episode'])]
                                search_result.same_day_special = True
                            elif int(sql_results[1]['season']) == 0 and int(sql_results[0]['season']) != 0:
                                search_result.actual_season = int(sql_results[0]['season'])
                                search_result.actual_episodes = [int(sql_results[0]['episode'])]
                                search_result.same_day_special = True
                        elif len(sql_results) != 1:
                            log.warning(
                                "Tried to look up the date for the episode {0} but the database didn't return proper "
                                'results, skipping it', search_result.name
                            )
                            search_result.result_wanted = False
                            continue

                        # @TODO: Need to verify and test this.
                        if search_result.result_wanted and not search_result.same_day_special:
                            search_result.actual_season = int(sql_results[0]['season'])
                            search_result.actual_episodes = [int(sql_results[0]['episode'])]

        final_results = {}
        cl = []
        # Iterate again over the search results, and see if there is anything we want.
        for search_result in results:

            # Try to cache the item if we want to.
            cache_result = search_result.add_result_to_cache(self.cache)
            if cache_result is not None:
                cl.append(cache_result)

            if not search_result.result_wanted:
                log.debug("We aren't interested in this result: {0} with url: {1}",
                          search_result.name, search_result.url)
                continue

            log.debug('Found result {0} at {1}', search_result.name, search_result.url)

            search_result.update_search_result()

            if search_result.episode_number == SEASON_RESULT:
                log.debug('Found season pack result {0} at {1}', search_result.name, search_result.url)
            elif search_result.episode_number == MULTI_EP_RESULT:
                log.debug('Found multi-episode ({0}) result {1} at {2}',
                          ', '.join(map(str, search_result.parsed_result.episode_numbers)),
                          search_result.name,
                          search_result.url)
            else:
                log.debug('Found single episode result {0} at {1}', search_result.name, search_result.url)

            if search_result.episode_number not in final_results:
                final_results[search_result.episode_number] = [search_result]
            else:
                final_results[search_result.episode_number].append(search_result)

        if cl:
            # Access to a protected member of a client class
            db = self.cache._get_db()
            db.mass_action(cl)

        return final_results

    def get_id(self):
        """Get ID of the provider."""
        return GenericProvider.make_id(self.name)

    def get_result(self, series, item=None, cache=None):
        """Get result."""
        search_result = SearchResult(provider=self, series=series,
                                     item=item, cache=cache)

        return search_result

    def image_name(self):
        """Return provider image name."""
        return self.get_id() + '.png'

    def is_active(self):
        """Check if provider is active."""
        return False

    def is_enabled(self):
        """Check if provider is enabled."""
        return bool(self.enabled)

    @staticmethod
    def make_id(name):
        """Make ID of the provider."""
        if not name:
            return ''

        return re.sub(r'[^\w\d_]', '_', str(name).strip().lower())

    def seed_ratio(self):
        """Return ratio."""
        return ''

    def _check_auth(self):
        """Check if we are autenticated."""
        return True

    def login(self):
        """Login to provider."""
        return True

    def search(self, search_strings, age=0, ep_obj=None, **kwargs):
        """Search the provider."""
        return []

    @staticmethod
    def parse_pubdate(pubdate, human_time=False, timezone=None, **kwargs):
        """
        Parse publishing date into a datetime object.

        :param pubdate: date and time string
        :param human_time: string uses human slang ("4 hours ago")
        :param timezone: use a different timezone ("US/Eastern")

        :keyword dayfirst: Interpret the first value as the day
        :keyword yearfirst: Interpret the first value as the year

        :returns: a datetime object or None
        """
        now_alias = ('right now', 'just now', 'now')

        df = kwargs.pop('dayfirst', False)
        yf = kwargs.pop('yearfirst', False)
        fromtimestamp = kwargs.pop('fromtimestamp', False)

        # This can happen from time to time
        if pubdate is None:
            log.debug('Skipping invalid publishing date.')
            return

        try:
            if human_time:
                if pubdate.lower() in now_alias:
                    seconds = 0
                else:
                    match = re.search(r'(?P<time>[\d.]+\W*)(?P<granularity>\w+)', pubdate)
                    matched_time = match.group('time')
                    matched_granularity = match.group('granularity')

                    # The parse method does not support decimals used with the month,
                    # months, year or years granularities.
                    if matched_granularity and matched_granularity in ('month', 'months', 'year', 'years'):
                        matched_time = int(round(float(matched_time.strip())))

                    seconds = parse('{0} {1}'.format(matched_time, matched_granularity))
                    if seconds is None:
                        log.warning('Failed parsing human time: {0} {1}', matched_time, matched_granularity)
                        raise ValueError('Failed parsing human time: {0} {1}'.format(matched_time, matched_granularity))

                return datetime.now(tz.tzlocal()) - timedelta(seconds=seconds)

            if fromtimestamp:
                dt = datetime.fromtimestamp(int(pubdate), tz=tz.gettz('UTC'))
            else:
                day_offset = 0
                if 'yesterday at' in pubdate.lower() or 'today at' in pubdate.lower():
                    # Extract a time
                    time = re.search(r'(?P<time>[0-9:]+)', pubdate)
                    if time:
                        if 'yesterday' in pubdate:
                            day_offset = 1
                        pubdate = time.group('time').strip()

                dt = parser.parse(pubdate, dayfirst=df, yearfirst=yf, fuzzy=True) - timedelta(days=day_offset)

            # Always make UTC aware if naive
            if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:
                dt = dt.replace(tzinfo=tz.gettz('UTC'))
            if timezone:
                dt = dt.astimezone(tz.gettz(timezone))

            return dt
        except (AttributeError, TypeError, ValueError):
            log.exception('Failed parsing publishing date: {0}', pubdate)

    def _create_air_by_date_search_string(self, show_scene_name, episode, search_string, add_string=None):
        """Create a search string used for series that are indexed by air date."""
        episode_string = show_scene_name + self.search_separator
        episode_string += str(episode.airdate).replace('-', ' ')

        if add_string:
            episode_string += self.search_separator + add_string

        search_string['Episode'].append(episode_string.strip())

    def _create_sports_search_string(self, show_scene_name, episode, search_string, add_string=None):
        """Create a search string used for sport series."""
        episode_string = show_scene_name + self.search_separator

        episode_string += str(episode.airdate).replace('-', ' ')
        episode_string += ('|', ' ')[len(self.proper_strings) > 1]
        episode_string += episode.airdate.strftime('%b')

        if add_string:
            episode_string += self.search_separator + add_string

        search_string['Episode'].append(episode_string.strip())

    def _create_anime_search_string(self, show_scene_name, episode, search_string, add_string=None):
        """Create a search string used for as anime 'marked' shows."""
        episode_string = show_scene_name + self.search_separator

        # If the show name is a season scene exception, we want to use the episode number
        if episode.scene_season > 0 and show_scene_name in scene_exceptions.get_season_scene_exceptions(
                episode.series, episode.scene_season):
            # This is apparently a season exception, let's use the episode instead of absolute
            ep = episode.scene_episode
        else:
            ep = episode.scene_absolute_number if episode.series.is_scene else episode.absolute_number

        episode_string += '{episode:0>2}'.format(episode=ep)

        if add_string:
            episode_string += self.search_separator + add_string

        search_string['Episode'].append(episode_string.strip())

    def _create_default_search_string(self, show_scene_name, episode, search_string, add_string=None):
        """Create a default search string, used for standard type S01E01 tv series."""
        episode_string = show_scene_name + self.search_separator

        episode_string += config.naming_ep_type[2] % {
            'seasonnumber': episode.scene_season if episode.series.is_scene else episode.season,
            'episodenumber': episode.scene_episode if episode.series.is_scene else episode.episode,
        }

        if add_string:
            episode_string += self.search_separator + add_string

        search_string['Episode'].append(episode_string.strip())

    def _get_episode_search_strings(self, episode, add_string=''):
        """Get episode search strings."""
        if not episode:
            return []

        search_string = {
            'Episode': []
        }

        all_possible_show_names = episode.series.get_all_possible_names()
        if episode.scene_season:
            all_possible_show_names = all_possible_show_names.union(
                episode.series.get_all_possible_names(season=episode.scene_season)
            )

        for show_name in all_possible_show_names:

            if episode.series.air_by_date:
                self._create_air_by_date_search_string(show_name, episode, search_string, add_string=add_string)
            elif episode.series.sports:
                self._create_sports_search_string(show_name, episode, search_string, add_string=add_string)
            elif episode.series.anime:
                self._create_anime_search_string(show_name, episode, search_string, add_string=add_string)
            else:
                self._create_default_search_string(show_name, episode, search_string, add_string=add_string)

        return [search_string]

    def _get_tvdb_id(self):
        """Return the tvdb id if the shows indexer is tvdb. If not, try to use the externals to get it."""
        if not self.series:
            return None

        return self.series.indexerid if self.series.indexer == INDEXER_TVDBV2 else self.series.externals.get('tvdb_id')

    def _get_season_search_strings(self, episode):
        search_string = {
            'Season': []
        }

        for show_name in episode.series.get_all_possible_names(season=episode.scene_season):
            episode_string = show_name + self.search_separator

            if episode.series.air_by_date or episode.series.sports:
                search_string['Season'].append(episode_string + str(episode.airdate).split('-')[0])
            elif episode.series.anime:
                search_string['Season'].append(episode_string + 'Season')
            else:
                for season_template in self.season_templates:
                    templated_episode_string = episode_string + season_template.format(season=episode.scene_season)
                    search_string['Season'].append(templated_episode_string.strip())

        return [search_string]

    def _get_size(self, item):
        """Return default size."""
        return -1

    def _get_storage_dir(self):
        """Return storage dir."""
        return ''

    def _get_result_info(self, item):
        """Return default seeders and leechers."""
        return -1, -1

    def _get_pubdate(self, item):
        """Return publish date of the item.

        If provider doesnt have _get_pubdate function this will be used
        """
        return None

    def _get_title_and_url(self, item):
        """Return title and url from result."""
        if not item:
            return '', ''

        title = item.get('title', '')
        url = item.get('link', '')

        if title:
            title = title.replace(' ', '.')
        else:
            title = ''

        if url:
            url = url.replace('&amp;', '&').replace('%26tr%3D', '&tr=')
        else:
            url = ''

        return title, url

    @staticmethod
    def _get_identifier(item):
        """
        Return the identifier for the item.

        By default this is the url. Providers can overwrite this, when needed.
        """
        return item.url

    @property
    def recent_results(self):
        """Return recent RSS results from provier."""
        return recent_results.get(self.get_id(), [])

    @recent_results.setter
    def recent_results(self, items):
        """Set recent results from provider."""
        if not recent_results.get(self.get_id()):
            recent_results.update({self.get_id(): []})
        if items:
            add_to_list = []
            for item in items:
                if item not in recent_results[self.get_id()]:
                    add_to_list += [item]
            results = add_to_list + recent_results[self.get_id()]
            recent_results[self.get_id()] = results[:self.max_recent_items]

    def add_cookies_from_ui(self):
        """
        Add the cookies configured from UI to the providers requests session.

        :return: A dict with the the keys result as bool and message as string
        """
        # Added exception for rss torrent providers, as for them adding cookies initial should be optional.
        from medusa.providers.torrent.rss.rsstorrent import TorrentRssProvider
        if isinstance(self, TorrentRssProvider) and not self.cookies:
            return {'result': True,
                    'message': 'This is a TorrentRss provider without any cookies provided. '
                               'Cookies for this provider are considered optional.'}

        # This is the generic attribute used to manually add cookies for provider authentication
        if not self.enable_cookies:
            return {'result': False,
                    'message': 'Adding cookies is not supported for provider: {0}'.format(self.name)}

        if not self.cookies:
            return {'result': False,
                    'message': 'No Cookies added from ui for provider: {0}'.format(self.name)}

        cookie_validator = re.compile(r'^([\w%]+=[\w%]+)(;[\w%]+=[\w%]+)*$')
        if not cookie_validator.match(self.cookies):
            ui.notifications.message(
                'Failed to validate cookie for provider {provider}'.format(provider=self.name),
                'Cookie is not correctly formatted: {0}'.format(self.cookies))
            return {'result': False,
                    'message': 'Cookie is not correctly formatted: {0}'.format(self.cookies)}

        if self.required_cookies:
            if self.name == 'Beyond-HD':
                if not any('remember_web_' in x.rsplit('=', 1)[0] for x in self.cookies.split(';')):
                    return {
                        'result': False,
                        'message': "You haven't configured the required cookies. Please login at {provider_url}, "
                        'and make sure you have copied the following cookies: {required_cookies!r}'.format(
                            provider_url=self.name, required_cookies=self.required_cookies
                        )
                    }
            else:
                if not all(req_cookie in [x.rsplit('=', 1)[0] for x in self.cookies.split(';')]
                           for req_cookie in self.required_cookies):
                    return {
                        'result': False,
                        'message': "You haven't configured the required cookies. Please login at {provider_url}, "
                        'and make sure you have copied the following cookies: {required_cookies!r}'.format(
                            provider_url=self.name, required_cookies=self.required_cookies
                        )
                    }

        # cookie_validator got at least one cookie key/value pair, let's return success
        add_dict_to_cookiejar(self.session.cookies, dict(x.rsplit('=', 1) for x in self.cookies.split(';')))
        return {'result': True,
                'message': ''}

    def check_required_cookies(self):
        """
        Check if we have the required cookies in the requests sessions object.

        Meaning that we've already successfully authenticated once, and we don't need to go through this again.
        Note! This doesn't mean the cookies are correct!
        """
        if not hasattr(self, 'required_cookies'):
            # A reminder for the developer, implementing cookie based authentication.
            log.error(
                'You need to configure the required_cookies attribute, for the provider: {provider}',
                {'provider': self.name}
            )
            return False
        return all(dict_from_cookiejar(self.session.cookies).get(cookie) for cookie in self.required_cookies)

    def cookie_login(self, check_login_text, check_url=None):
        """
        Check the response for text that indicates a login prompt.

        In that case, the cookie authentication was not successful.
        :param check_login_text: A string that's visible when the authentication failed.
        :param check_url: The url to use to test the login with cookies. By default the providers home page is used.

        :return: False when authentication was not successful. True if successful.
        """
        check_url = check_url or self.url

        if self.check_required_cookies():
            # All required cookies have been found within the current session, we don't need to go through this again.
            return True

        if self.cookies:
            result = self.add_cookies_from_ui()
            if not result['result']:
                ui.notifications.message(result['message'])
                log.warning(result['message'])
                return False
        else:
            log.warning('Failed to login, you will need to add your cookies in the provider settings')
            ui.notifications.error('Failed to auth with {provider}'.format(provider=self.name),
                                   'You will need to add your cookies in the provider settings')
            return False

        response = self.session.get(check_url)
        if not response or any([not (response.text and response.status_code == 200),
                                check_login_text.lower() in response.text.lower()]):
            log.warning('Please configure the required cookies for this provider. Check your provider settings')
            ui.notifications.error('Wrong cookies for {provider}'.format(provider=self.name),
                                   'Check your provider settings')
            self.session.cookies.clear()
            return False
        else:
            return True

    def __str__(self):
        """Return provider name and provider type."""
        return '{provider_name} ({provider_type})'.format(provider_name=self.name, provider_type=self.provider_type)

    def __unicode__(self):
        """Return provider name and provider type."""
        return '{provider_name} ({provider_type})'.format(provider_name=self.name, provider_type=self.provider_type)

    def to_json(self):
        """Return a json representation for a provider."""
        from medusa.providers.torrent.torrent_provider import TorrentProvider
        return {
            'name': self.name,
            'id': self.get_id(),
            'config': {
                'enabled': self.enabled,
                'search': {
                    'backlog': {
                        'enabled': self.enable_backlog
                    },
                    'manual': {
                        'enabled': self.enable_backlog
                    },
                    'daily': {
                        'enabled': self.enable_backlog,
                        'maxRecentItems': self.max_recent_items,
                        'stopAt': self.stop_at
                    },
                    'fallback': self.search_fallback,
                    'mode': self.search_mode,
                    'separator': self.search_separator,
                    'seasonTemplates': self.season_templates,
                    'delay': {
                        'enabled': self.enable_search_delay,
                        'duration': self.search_delay
                    }
                }
            },
            'animeOnly': self.anime_only,
            'type': self.provider_type,
            'public': self.public,
            'btCacheUrls': self.bt_cache_urls if isinstance(self, TorrentProvider) else [],
            'properStrings': self.proper_strings,
            'headers': self.headers,
            'supportsAbsoluteNumbering': self.supports_absolute_numbering,
            'supportsBacklog': self.supports_backlog,
            'url': self.url,
            'urls': self.urls,
            'cookies': {
                'enabled': self.enable_cookies,
                'required': self.cookies
            }
        }
示例#15
0
class GenericProvider(object):
    """Generic provider."""

    NZB = 'nzb'
    TORRENT = 'torrent'

    def __init__(self, name):
        """Initialize the class."""
        self.name = name

        self.anime_only = False
        self.bt_cache_urls = [
            'http://reflektor.karmorra.info/torrent/{info_hash}.torrent',
            'https://asnet.pw/download/{info_hash}/',
            'http://p2pdl.com/download/{info_hash}',
            'http://itorrents.org/torrent/{info_hash}.torrent',
            'http://thetorrent.org/torrent/{info_hash}.torrent',
            'https://cache.torrentgalaxy.org/get/{info_hash}',
            'https://www.seedpeer.me/torrent/{info_hash}',
        ]
        self.cache = tv.Cache(self)
        self.enable_backlog = False
        self.enable_manualsearch = False
        self.enable_daily = False
        self.enabled = False
        self.headers = {'User-Agent': USER_AGENT}
        self.proper_strings = ['PROPER|REPACK|REAL|RERIP']
        self.provider_type = None
        self.public = False
        self.search_fallback = False
        self.search_mode = None
        self.session = MedusaSafeSession(cloudflare=True)
        self.session.headers.update(self.headers)
        self.series = None
        self.supports_absolute_numbering = False
        self.supports_backlog = True
        self.url = ''
        self.urls = {}

        # Ability to override the search separator. As for example anizb is using '*' instead of space.
        self.search_separator = ' '
        self.season_templates = (
            'S{season:0>2}',  # example: 'Series.Name.S03'
        )

        # Use and configure the attribute enable_cookies to show or hide the cookies input field per provider
        self.enable_cookies = False
        self.cookies = ''

        # Paramaters for reducting the daily search results parsing
        self.max_recent_items = 5
        self.stop_at = 3

        # Delay downloads
        self.enable_search_delay = False
        self.search_delay = 480  # minutes

    @classmethod
    def kind(cls):
        """Return the name of the current class."""
        return cls.__name__

    def download_result(self, result):
        """Download result from provider."""
        if not self.login():
            return False

        urls, filename = self._make_url(result)

        for url in urls:
            if 'NO_DOWNLOAD_NAME' in url:
                continue

            if url.startswith('http'):
                self.headers.update({
                    'Referer': '/'.join(url.split('/')[:3]) + '/'
                })

            log.info('Downloading {result} from {provider} at {url}',
                     {'result': result.name, 'provider': self.name, 'url': url})

            verify = False if self.public else None

            if download_file(url, filename, session=self.session, headers=self.headers,
                             verify=verify):

                if self._verify_download(filename):
                    log.info('Saved {result} to {location}',
                             {'result': result.name, 'location': filename})
                    return True

        log.warning('Failed to download any results for {result}',
                    {'result': result.name})

        return False

    def _make_url(self, result):
        """Return url if result is a magnet link."""
        urls = []
        filename = ''

        if not result or not result.url:
            return urls, filename

        urls = [result.url]
        result_name = sanitize_filename(result.name)

        # TODO: Remove this in future versions, kept for the warning
        # Some NZB providers (e.g. Jackett) can also download torrents
        # A similar check is performed for NZB splitting in medusa/search/core.py @ search_providers()
        if (result.url.endswith(GenericProvider.TORRENT) or
                result.url.startswith('magnet:')) and self.provider_type == GenericProvider.NZB:
            filename = join(app.TORRENT_DIR, result_name + '.torrent')
            log.warning('Using Jackett providers as Newznab providers is deprecated!'
                        ' Switch them to Jackett providers as soon as possible.')
        else:
            filename = join(self._get_storage_dir(), result_name + '.' + self.provider_type)

        return urls, filename

    def _verify_download(self, file_name=None):
        return True

    def get_content(self, url, params=None, timeout=30, **kwargs):
        """Retrieve the torrent/nzb content."""
        return self.session.get_content(url, params=params, timeout=timeout, **kwargs)

    def find_propers(self, proper_candidates):
        """Find propers in providers."""
        results = []

        for proper_candidate in proper_candidates:
            series_obj = Show.find_by_id(app.showList, proper_candidate['indexer'], proper_candidate['showid'])

            if series_obj:
                self.series = series_obj
                episode_obj = series_obj.get_episode(proper_candidate['season'], proper_candidate['episode'])

                for term in self.proper_strings:
                    search_strings = self._get_episode_search_strings(episode_obj, add_string=term)

                    for item in self.search(search_strings[0], ep_obj=episode_obj):
                        search_result = self.get_result()
                        results.append(search_result)

                        search_result.name, search_result.url = self._get_title_and_url(item)
                        search_result.seeders, search_result.leechers = self._get_result_info(item)
                        search_result.size = self._get_size(item)
                        search_result.pubdate = self._get_pubdate(item)

                        # This will be retrieved from the parser
                        search_result.proper_tags = ''

                        search_result.search_type = PROPER_SEARCH
                        search_result.date = datetime.today()
                        search_result.series = series_obj

        return results

    @staticmethod
    def remove_duplicate_mappings(items, pk='link'):
        """
        Remove duplicate items from an iterable of mappings.

        :param items: An iterable of mappings
        :param pk: Primary key for removing duplicates
        :return: An iterable of unique mappings
        """
        return list(
            itervalues(OrderedDict(
                (item[pk], item)
                for item in items
            ))
        )

    def search_results_in_cache(self, episodes):
        """
        Search episodes based on param in cache.

        Search the cache (db) for this provider
        :param episodes: List of Episode objects

        :return: A dict of search results, ordered by episode number
        """
        return self.cache.find_episodes(episodes)

    def find_search_results(self, series, episodes, search_mode, forced_search=False, download_current_quality=False,
                            manual_search=False, manual_search_type='episode'):
        """
        Search episodes based on param.

        Search the provider using http queries.
        :param series: Series object
        :param episodes: List of Episode objects
        :param search_mode: 'eponly' or 'sponly'
        :param forced_search: Flag if the search was triggered by a forced search
        :param download_current_quality: Flag if we want to include an already downloaded quality in the new search
        :param manual_search: Flag if the search was triggered by a manual search
        :param manual_search_type: How the manual search was started: For example an 'episode' or 'season'

        :return: A dict of search results, ordered by episode number.
        """
        self._check_auth()
        self.series = series

        results = {}
        items_list = []
        season_search = (len(episodes) > 1 or manual_search_type == 'season') and search_mode == 'sponly'

        for episode in episodes:
            search_strings = []
            if season_search:
                search_strings = self._get_season_search_strings(episode)
            elif search_mode == 'eponly':
                search_strings = self._get_episode_search_strings(episode)

            for search_string in search_strings:
                # Find results from the provider
                items_list += self.search(
                    search_string, ep_obj=episode, manual_search=manual_search
                )

            # In season search, we can't loop in episodes lists as we
            # only need one episode to get the season string
            if search_mode == 'sponly':
                break

        # Remove duplicate items
        unique_items = self.remove_duplicate_mappings(items_list)
        log.debug('Found {0} unique items', len(unique_items))

        # categorize the items into lists by quality
        categorized_items = defaultdict(list)
        for item in unique_items:
            quality = self.get_quality(item, anime=series.is_anime)
            categorized_items[quality].append(item)

        # sort qualities in descending order
        sorted_qualities = sorted(categorized_items, reverse=True)
        log.debug('Found qualities: {0}', sorted_qualities)

        # chain items sorted by quality
        sorted_items = chain.from_iterable(
            categorized_items[quality]
            for quality in sorted_qualities
        )

        # unpack all of the quality lists into a single sorted list
        items_list = list(sorted_items)

        # Move through each item and parse it into a quality
        search_results = []
        for item in items_list:

            # Make sure we start with a TorrentSearchResult, NZBDataSearchResult or NZBSearchResult search result obj.
            search_result = self.get_result()
            search_results.append(search_result)
            search_result.item = item
            search_result.download_current_quality = download_current_quality
            # FIXME: Should be changed to search_result.search_type
            search_result.forced_search = forced_search

            (search_result.name, search_result.url) = self._get_title_and_url(item)
            (search_result.seeders, search_result.leechers) = self._get_result_info(item)

            search_result.size = self._get_size(item)
            search_result.pubdate = self._get_pubdate(item)

            search_result.result_wanted = True

            try:
                search_result.parsed_result = NameParser(
                    parse_method=('normal', 'anime')[series.is_anime]).parse(
                        search_result.name)
            except (InvalidNameException, InvalidShowException) as error:
                log.debug('Error during parsing of release name: {release_name}, with error: {error}',
                          {'release_name': search_result.name, 'error': error})
                search_result.add_cache_entry = False
                search_result.result_wanted = False
                continue

            # I don't know why i'm doing this. Maybe remove it later on all together, now i've added the parsed_result
            # to the search_result.
            search_result.series = search_result.parsed_result.series
            search_result.quality = search_result.parsed_result.quality
            search_result.release_group = search_result.parsed_result.release_group
            search_result.version = search_result.parsed_result.version
            search_result.actual_season = search_result.parsed_result.season_number
            search_result.actual_episodes = search_result.parsed_result.episode_numbers

            if not manual_search:
                if not (search_result.series.air_by_date or search_result.series.sports):
                    if search_mode == 'sponly':
                        if search_result.parsed_result.episode_numbers:
                            log.debug(
                                'This is supposed to be a season pack search but the result {0} is not a valid '
                                'season pack, skipping it', search_result.name
                            )
                            search_result.result_wanted = False
                            continue
                        elif not [ep for ep in episodes if
                                  search_result.parsed_result.season_number == (ep.season, ep.scene_season)
                                  [ep.series.is_scene]]:
                            log.debug(
                                'This season result {0} is for a season we are not searching for, '
                                'skipping it', search_result.name
                            )
                            search_result.result_wanted = False
                            continue
                    else:
                        # I'm going to split these up for better readability
                        # Check if at least got a season parsed.
                        if search_result.parsed_result.season_number is None:
                            log.debug(
                                "The result {0} doesn't seem to have a valid season that we are currently trying to "
                                'snatch, skipping it', search_result.name
                            )
                            search_result.result_wanted = False
                            continue

                        # Check if we at least got some episode numbers parsed.
                        if not search_result.parsed_result.episode_numbers:
                            log.debug(
                                "The result {0} doesn't seem to match an episode that we are currently trying to "
                                'snatch, skipping it', search_result.name
                            )
                            search_result.result_wanted = False
                            continue

                        # Compare the episodes and season from the result with what was searched.
                        if not [searched_episode for searched_episode in episodes
                                if searched_episode.season == search_result.parsed_result.season_number and
                                (searched_episode.episode, searched_episode.scene_episode)
                                [searched_episode.series.is_scene] in
                                search_result.parsed_result.episode_numbers]:
                            log.debug(
                                "The result {0} doesn't seem to match an episode that we are currently trying to "
                                'snatch, skipping it', search_result.name
                            )
                            search_result.result_wanted = False
                            continue

                    # We've performed some checks to decided if we want to continue with this result.
                    # If we've hit this, that means this is not an air_by_date and not a sports show. And it seems to be
                    # a valid result. Let's store the parsed season and episode number and continue.
                    search_result.actual_season = search_result.parsed_result.season_number
                    search_result.actual_episodes = search_result.parsed_result.episode_numbers
                else:
                    # air_by_date or sportshow.
                    search_result.same_day_special = False

                    if not search_result.parsed_result.is_air_by_date:
                        log.debug(
                            "This is supposed to be a date search but the result {0} didn't parse as one, "
                            'skipping it', search_result.name
                        )
                        search_result.result_wanted = False
                        continue
                    else:
                        # Use a query against the tv_episodes table, to match the parsed air_date against.
                        air_date = search_result.parsed_result.air_date.toordinal()
                        db = DBConnection()
                        sql_results = db.select(
                            'SELECT season, episode FROM tv_episodes WHERE indexer = ? AND showid = ? AND airdate = ?',
                            [search_result.series.indexer, search_result.series.series_id, air_date]
                        )

                        if len(sql_results) == 2:
                            if int(sql_results[0]['season']) == 0 and int(sql_results[1]['season']) != 0:
                                search_result.actual_season = int(sql_results[1]['season'])
                                search_result.actual_episodes = [int(sql_results[1]['episode'])]
                                search_result.same_day_special = True
                            elif int(sql_results[1]['season']) == 0 and int(sql_results[0]['season']) != 0:
                                search_result.actual_season = int(sql_results[0]['season'])
                                search_result.actual_episodes = [int(sql_results[0]['episode'])]
                                search_result.same_day_special = True
                        elif len(sql_results) != 1:
                            log.warning(
                                "Tried to look up the date for the episode {0} but the database didn't return proper "
                                'results, skipping it', search_result.name
                            )
                            search_result.result_wanted = False
                            continue

                        # @TODO: Need to verify and test this.
                        if search_result.result_wanted and not search_result.same_day_special:
                            search_result.actual_season = int(sql_results[0]['season'])
                            search_result.actual_episodes = [int(sql_results[0]['episode'])]

        cl = []
        # Iterate again over the search results, and see if there is anything we want.
        for search_result in search_results:

            # Try to cache the item if we want to.
            cache_result = search_result.add_result_to_cache(self.cache)
            if cache_result is not None:
                cl.append(cache_result)

            if not search_result.result_wanted:
                log.debug("We aren't interested in this result: {0} with url: {1}",
                          search_result.name, search_result.url)
                continue

            log.debug('Found result {0} at {1}', search_result.name, search_result.url)

            search_result.create_episode_object()
            # result = self.get_result(episode_object, search_result)
            search_result.finish_search_result(self)

            if not search_result.actual_episodes:
                episode_number = SEASON_RESULT
                log.debug('Found season pack result {0} at {1}', search_result.name, search_result.url)
            elif len(search_result.actual_episodes) == 1:
                episode_number = search_result.actual_episode
                log.debug('Found single episode result {0} at {1}', search_result.name, search_result.url)
            else:
                episode_number = MULTI_EP_RESULT
                log.debug('Found multi-episode ({0}) result {1} at {2}',
                          ', '.join(map(str, search_result.parsed_result.episode_numbers)),
                          search_result.name,
                          search_result.url)

            if episode_number not in results:
                results[episode_number] = [search_result]
            else:
                results[episode_number].append(search_result)

        if cl:
            # Access to a protected member of a client class
            db = self.cache._get_db()
            db.mass_action(cl)

        return results

    def get_id(self):
        """Get ID of the provider."""
        return GenericProvider.make_id(self.name)

    def get_quality(self, item, anime=False):
        """Get quality of the result from its name."""
        (title, _) = self._get_title_and_url(item)
        quality = Quality.quality_from_name(title, anime)

        return quality

    def get_result(self, episodes=None):
        """Get result."""
        return self._get_result(episodes)

    def image_name(self):
        """Return provider image name."""
        return self.get_id() + '.png'

    def is_active(self):
        """Check if provider is active."""
        return False

    def is_enabled(self):
        """Check if provider is enabled."""
        return bool(self.enabled)

    @staticmethod
    def make_id(name):
        """Make ID of the provider."""
        if not name:
            return ''

        return re.sub(r'[^\w\d_]', '_', str(name).strip().lower())

    def seed_ratio(self):
        """Return ratio."""
        return ''

    def _check_auth(self):
        """Check if we are autenticated."""
        return True

    def login(self):
        """Login to provider."""
        return True

    def search(self, search_strings, age=0, ep_obj=None, **kwargs):
        """Search the provider."""
        return []

    @staticmethod
    def parse_pubdate(pubdate, human_time=False, timezone=None, **kwargs):
        """
        Parse publishing date into a datetime object.

        :param pubdate: date and time string
        :param human_time: string uses human slang ("4 hours ago")
        :param timezone: use a different timezone ("US/Eastern")

        :keyword dayfirst: Interpret the first value as the day
        :keyword yearfirst: Interpret the first value as the year

        :returns: a datetime object or None
        """
        now_alias = ('right now', 'just now', 'now')

        df = kwargs.pop('dayfirst', False)
        yf = kwargs.pop('yearfirst', False)
        fromtimestamp = kwargs.pop('fromtimestamp', False)

        # This can happen from time to time
        if pubdate is None:
            log.debug('Skipping invalid publishing date.')
            return

        try:
            if human_time:
                if pubdate.lower() in now_alias:
                    seconds = 0
                else:
                    match = re.search(r'(?P<time>[\d.]+\W*)(?P<granularity>\w+)', pubdate)
                    matched_time = match.group('time')
                    matched_granularity = match.group('granularity')

                    # The parse method does not support decimals used with the month,
                    # months, year or years granularities.
                    if matched_granularity and matched_granularity in ('month', 'months', 'year', 'years'):
                        matched_time = int(round(float(matched_time.strip())))

                    seconds = parse('{0} {1}'.format(matched_time, matched_granularity))
                    if seconds is None:
                        log.warning('Failed parsing human time: {0} {1}', matched_time, matched_granularity)
                        raise ValueError('Failed parsing human time: {0} {1}'.format(matched_time, matched_granularity))

                return datetime.now(tz.tzlocal()) - timedelta(seconds=seconds)

            if fromtimestamp:
                dt = datetime.fromtimestamp(int(pubdate), tz=tz.gettz('UTC'))
            else:
                day_offset = 0
                if 'yesterday at' in pubdate.lower() or 'today at' in pubdate.lower():
                    # Extract a time
                    time = re.search(r'(?P<time>[0-9:]+)', pubdate)
                    if time:
                        if 'yesterday' in pubdate:
                            day_offset = 1
                        pubdate = time.group('time').strip()

                dt = parser.parse(pubdate, dayfirst=df, yearfirst=yf, fuzzy=True) - timedelta(days=day_offset)

            # Always make UTC aware if naive
            if dt.tzinfo is None or dt.tzinfo.utcoffset(dt) is None:
                dt = dt.replace(tzinfo=tz.gettz('UTC'))
            if timezone:
                dt = dt.astimezone(tz.gettz(timezone))

            return dt
        except (AttributeError, TypeError, ValueError):
            log.exception('Failed parsing publishing date: {0}', pubdate)

    def _get_result(self, episodes=None):
        """Get result."""
        return SearchResult(episodes)

    def _create_air_by_date_search_string(self, show_scene_name, episode, search_string, add_string=None):
        """Create a search string used for series that are indexed by air date."""
        episode_string = show_scene_name + self.search_separator
        episode_string += str(episode.airdate).replace('-', ' ')

        if add_string:
            episode_string += self.search_separator + add_string

        search_string['Episode'].append(episode_string.strip())

    def _create_sports_search_string(self, show_scene_name, episode, search_string, add_string=None):
        """Create a search string used for sport series."""
        episode_string = show_scene_name + self.search_separator

        episode_string += str(episode.airdate).replace('-', ' ')
        episode_string += ('|', ' ')[len(self.proper_strings) > 1]
        episode_string += episode.airdate.strftime('%b')

        if add_string:
            episode_string += self.search_separator + add_string

        search_string['Episode'].append(episode_string.strip())

    def _create_anime_search_string(self, show_scene_name, episode, search_string, add_string=None):
        """Create a search string used for as anime 'marked' shows."""
        episode_string = show_scene_name + self.search_separator

        # If the show name is a season scene exception, we want to use the indexer episode number.
        if (episode.scene_season > 1 and
                show_scene_name in scene_exceptions.get_season_scene_exceptions(episode.series, episode.scene_season)):
            # This is apparently a season exception, let's use the scene_episode instead of absolute
            ep = episode.scene_episode
        else:
            ep = episode.scene_absolute_number

        episode_string += '{episode:0>2}'.format(episode=ep)
        episode_string_fallback = episode_string + '{episode:0>3}'.format(episode=ep)

        if add_string:
            episode_string += self.search_separator + add_string
            episode_string_fallback += self.search_separator + add_string

        search_string['Episode'].append(episode_string.strip())

    def _create_default_search_string(self, show_scene_name, episode, search_string, add_string=None):
        """Create a default search string, used for standard type S01E01 tv series."""
        episode_string = show_scene_name + self.search_separator

        episode_string += config.naming_ep_type[2] % {
            'seasonnumber': episode.scene_season,
            'episodenumber': episode.scene_episode,
        }

        if add_string:
            episode_string += self.search_separator + add_string

        search_string['Episode'].append(episode_string.strip())

    def _get_episode_search_strings(self, episode, add_string=''):
        """Get episode search strings."""
        if not episode:
            return []

        search_string = {
            'Episode': []
        }

        all_possible_show_names = episode.series.get_all_possible_names()
        if episode.scene_season:
            all_possible_show_names = all_possible_show_names.union(
                episode.series.get_all_possible_names(season=episode.scene_season)
            )

        for show_name in all_possible_show_names:

            if episode.series.air_by_date:
                self._create_air_by_date_search_string(show_name, episode, search_string, add_string=add_string)
            elif episode.series.sports:
                self._create_sports_search_string(show_name, episode, search_string, add_string=add_string)
            elif episode.series.anime:
                self._create_anime_search_string(show_name, episode, search_string, add_string=add_string)
            else:
                self._create_default_search_string(show_name, episode, search_string, add_string=add_string)

        return [search_string]

    def _get_tvdb_id(self):
        """Return the tvdb id if the shows indexer is tvdb. If not, try to use the externals to get it."""
        if not self.series:
            return None

        return self.series.indexerid if self.series.indexer == INDEXER_TVDBV2 else self.series.externals.get('tvdb_id')

    def _get_season_search_strings(self, episode):
        search_string = {
            'Season': []
        }

        for show_name in episode.series.get_all_possible_names(season=episode.scene_season):
            episode_string = show_name + self.search_separator

            if episode.series.air_by_date or episode.series.sports:
                search_string['Season'].append(episode_string + str(episode.airdate).split('-')[0])
            elif episode.series.anime:
                search_string['Season'].append(episode_string + 'Season')
            else:
                for season_template in self.season_templates:
                    templated_episode_string = episode_string + season_template.format(season=episode.scene_season)
                    search_string['Season'].append(templated_episode_string.strip())

        return [search_string]

    def _get_size(self, item):
        """Return default size."""
        return -1

    def _get_storage_dir(self):
        """Return storage dir."""
        return ''

    def _get_result_info(self, item):
        """Return default seeders and leechers."""
        return -1, -1

    def _get_pubdate(self, item):
        """Return publish date of the item.

        If provider doesnt have _get_pubdate function this will be used
        """
        return None

    def _get_title_and_url(self, item):
        """Return title and url from result."""
        if not item:
            return '', ''

        title = item.get('title', '')
        url = item.get('link', '')

        if title:
            title = title.replace(' ', '.')
        else:
            title = ''

        if url:
            url = url.replace('&amp;', '&').replace('%26tr%3D', '&tr=')
        else:
            url = ''

        return title, url

    @property
    def recent_results(self):
        """Return recent RSS results from provier."""
        return recent_results.get(self.get_id(), [])

    @recent_results.setter
    def recent_results(self, items):
        """Set recent results from provider."""
        if not recent_results.get(self.get_id()):
            recent_results.update({self.get_id(): []})
        if items:
            add_to_list = []
            for item in items:
                if item['link'] not in {cache_item['link'] for cache_item in recent_results[self.get_id()]}:
                    add_to_list += [item]
            results = add_to_list + recent_results[self.get_id()]
            recent_results[self.get_id()] = results[:self.max_recent_items]

    def add_cookies_from_ui(self):
        """
        Add the cookies configured from UI to the providers requests session.

        :return: A dict with the the keys result as bool and message as string
        """
        # Added exception for rss torrent providers, as for them adding cookies initial should be optional.
        from medusa.providers.torrent.rss.rsstorrent import TorrentRssProvider
        if isinstance(self, TorrentRssProvider) and not self.cookies:
            return {'result': True,
                    'message': 'This is a TorrentRss provider without any cookies provided. '
                               'Cookies for this provider are considered optional.'}

        # This is the generic attribute used to manually add cookies for provider authentication
        if not self.enable_cookies:
            return {'result': False,
                    'message': 'Adding cookies is not supported for provider: {0}'.format(self.name)}

        if not self.cookies:
            return {'result': False,
                    'message': 'No Cookies added from ui for provider: {0}'.format(self.name)}

        cookie_validator = re.compile(r'^([\w%]+=[\w%]+)(;[\w%]+=[\w%]+)*$')
        if not cookie_validator.match(self.cookies):
            ui.notifications.message(
                'Failed to validate cookie for provider {provider}'.format(provider=self.name),
                'Cookie is not correctly formatted: {0}'.format(self.cookies))
            return {'result': False,
                    'message': 'Cookie is not correctly formatted: {0}'.format(self.cookies)}

        if not all(req_cookie in [x.rsplit('=', 1)[0] for x in self.cookies.split(';')]
                   for req_cookie in self.required_cookies):
            return {
                'result': False,
                'message': "You haven't configured the requied cookies. Please login at {provider_url}, "
                           'and make sure you have copied the following cookies: {required_cookies!r}'
                           .format(provider_url=self.name, required_cookies=self.required_cookies)
            }

        # cookie_validator got at least one cookie key/value pair, let's return success
        add_dict_to_cookiejar(self.session.cookies, dict(x.rsplit('=', 1) for x in self.cookies.split(';')))
        return {'result': True,
                'message': ''}

    def check_required_cookies(self):
        """
        Check if we have the required cookies in the requests sessions object.

        Meaning that we've already successfully authenticated once, and we don't need to go through this again.
        Note! This doesn't mean the cookies are correct!
        """
        if not hasattr(self, 'required_cookies'):
            # A reminder for the developer, implementing cookie based authentication.
            log.error(
                'You need to configure the required_cookies attribute, for the provider: {provider}',
                {'provider': self.name}
            )
            return False
        return all(dict_from_cookiejar(self.session.cookies).get(cookie) for cookie in self.required_cookies)

    def cookie_login(self, check_login_text, check_url=None):
        """
        Check the response for text that indicates a login prompt.

        In that case, the cookie authentication was not successful.
        :param check_login_text: A string that's visible when the authentication failed.
        :param check_url: The url to use to test the login with cookies. By default the providers home page is used.

        :return: False when authentication was not successful. True if successful.
        """
        check_url = check_url or self.url

        if self.check_required_cookies():
            # All required cookies have been found within the current session, we don't need to go through this again.
            return True

        if self.cookies:
            result = self.add_cookies_from_ui()
            if not result['result']:
                ui.notifications.message(result['message'])
                log.warning(result['message'])
                return False
        else:
            log.warning('Failed to login, you will need to add your cookies in the provider settings')
            ui.notifications.error('Failed to auth with {provider}'.format(provider=self.name),
                                   'You will need to add your cookies in the provider settings')
            return False

        response = self.session.get(check_url)
        if not response or any([not (response.text and response.status_code == 200),
                                check_login_text.lower() in response.text.lower()]):
            log.warning('Please configure the required cookies for this provider. Check your provider settings')
            ui.notifications.error('Wrong cookies for {provider}'.format(provider=self.name),
                                   'Check your provider settings')
            self.session.cookies.clear()
            return False
        else:
            return True

    def __str__(self):
        """Return provider name and provider type."""
        return '{provider_name} ({provider_type})'.format(provider_name=self.name, provider_type=self.provider_type)

    def __unicode__(self):
        """Return provider name and provider type."""
        return '{provider_name} ({provider_type})'.format(provider_name=self.name, provider_type=self.provider_type)