Exemplo n.º 1
0
def get_aired_in_season(show, return_sql=False):
    ep_count = {}
    ep_count_scene = {}
    tomorrow = (datetime.date.today() + datetime.timedelta(days=1)).toordinal()
    my_db = db.DBConnection()

    if show.air_by_date:
        sql_string = 'SELECT ep.status, ep.season, ep.scene_season, ep.episode, ep.airdate ' + \
                     'FROM [tv_episodes] AS ep, [tv_shows] AS show ' + \
                     'WHERE season != 0 AND ep.showid = show.indexer_id AND show.paused = 0 ' + \
                     'AND ep.showid = ? AND ep.indexer = ? AND show.air_by_date = 1'
    else:
        sql_string = 'SELECT status, season, scene_season, episode, airdate ' + \
                     'FROM [tv_episodes] ' + \
                     'WHERE showid = ? AND indexer = ? AND season > 0'

    sql_results = my_db.select(sql_string, [show.indexerid, show.indexer])
    for result in sql_results:
        if 1 < helpers.tryInt(result['airdate']) <= tomorrow:
            cur_season = helpers.tryInt(result['season'])
            ep_count[cur_season] = ep_count.setdefault(cur_season, 0) + 1
            cur_scene_season = helpers.tryInt(result['scene_season'], -1)
            if -1 != cur_scene_season:
                ep_count_scene[cur_scene_season] = ep_count.setdefault(
                    cur_scene_season, 0) + 1

    if return_sql:
        return ep_count, ep_count_scene, sql_results

    return ep_count, ep_count_scene
Exemplo n.º 2
0
    def __init__(self, name, url, key='', cat_ids=None, search_mode=None, search_fallback=False,
                 enable_recentsearch=False, enable_backlog=False, enable_scheduled_backlog=False):
        generic.NZBProvider.__init__(self, name, True, False)

        self.url = url
        self.key = key
        self._exclude = set()
        self.cat_ids = cat_ids or ''
        self._cat_ids = None
        self.search_mode = search_mode or 'eponly'
        self.search_fallback = bool(tryInt(search_fallback))
        self.enable_recentsearch = bool(tryInt(enable_recentsearch))
        self.enable_backlog = bool(tryInt(enable_backlog))
        self.enable_scheduled_backlog = bool(tryInt(enable_scheduled_backlog, 1))
        self.needs_auth = '0' != self.key.strip()  # '0' in the key setting indicates that api_key is not needed
        self.default = False
        self._caps = {}
        self._caps_cats = {}
        self._caps_all_cats = []
        self._caps_need_apikey = {'need': False, 'date': datetime.date.fromordinal(1)}
        self._limits = 100
        self._last_recent_search = None
        self._caps_last_updated = datetime.datetime.fromordinal(1)
        self.cache = NewznabCache(self)
        # filters
        if super(NewznabProvider, self).get_id() in ('nzbs_org',):
            self.filter = []
            if 'nzbs_org' == super(NewznabProvider, self).get_id():
                self.may_filter = OrderedDict([
                    ('so', ('scene only', False)), ('snn', ('scene not nuked', False))])
Exemplo n.º 3
0
def parse_time(t):
    mo = time_regex.search(t)
    if mo is not None and len(mo.groups()) >= 5:
        if mo.group(5) is not None:
            try:
                hr = helpers.tryInt(mo.group(1))
                m = helpers.tryInt(mo.group(4))
                ap = mo.group(5)
                # convert am/pm to 24 hour clock
                if ap is not None:
                    if pm_regex.search(ap) is not None and hr != 12:
                        hr += 12
                    elif am_regex.search(ap) is not None and hr == 12:
                        hr -= 12
            except:
                hr = 0
                m = 0
        else:
            try:
                hr = helpers.tryInt(mo.group(1))
                m = helpers.tryInt(mo.group(6))
            except:
                hr = 0
                m = 0
    else:
        hr = 0
        m = 0
    if hr < 0 or hr > 23 or m < 0 or m > 59:
        hr = 0
        m = 0

    return hr, m
Exemplo n.º 4
0
def parse_time(t):
    mo = time_regex.search(t)
    if mo is not None and len(mo.groups()) >= 5:
        if mo.group(5) is not None:
            try:
                hr = helpers.tryInt(mo.group(1))
                m = helpers.tryInt(mo.group(4))
                ap = mo.group(5)
                # convert am/pm to 24 hour clock
                if ap is not None:
                    if pm_regex.search(ap) is not None and hr != 12:
                        hr += 12
                    elif am_regex.search(ap) is not None and hr == 12:
                        hr -= 12
            except:
                hr = 0
                m = 0
        else:
            try:
                hr = helpers.tryInt(mo.group(1))
                m = helpers.tryInt(mo.group(6))
            except:
                hr = 0
                m = 0
    else:
        hr = 0
        m = 0
    if hr < 0 or hr > 23 or m < 0 or m > 59:
        hr = 0
        m = 0

    return hr, m
Exemplo n.º 5
0
def _get_numbering_for_show(tbl, indexer, indexer_id):

    result = {}

    if None is not indexer_id:
        if 'tv_episodes' == tbl:
            xem_refresh(indexer_id, indexer)

        my_db = db.DBConnection()
        # noinspection SqlResolve
        rows = my_db.select(
            'SELECT season, episode, scene_season, scene_episode'
            ' FROM %s' % tbl + ' WHERE indexer = ? AND %s = ?' %
            ('indexer_id', 'showid')['tv_episodes' == tbl] +
            ' AND (scene_season OR scene_episode) != 0'
            ' ORDER BY season, episode',
            [int(indexer), int(indexer_id)])

        for row in rows:
            season, episode = tryInt(row['season'],
                                     None), tryInt(row['episode'], None)
            if None is not season and None is not episode:
                scene_season, scene_episode = tryInt(row['scene_season'],
                                                     None), tryInt(
                                                         row['scene_episode'],
                                                         None)
                if None is not scene_season and None is not scene_episode:
                    result[(season, episode)] = (scene_season, scene_episode)

    return result
def parse_date_time(d, t, network):
    if network_dict is None:
        load_network_dict()
    mo = time_regex.search(t)
    if mo is not None and len(mo.groups()) >= 2:
        try:
            hr = helpers.tryInt(mo.group(1))
            m = helpers.tryInt(mo.group(2))
            ap = mo.group(3)
            # convert am/pm to 24 hour clock
            if ap is not None:
                if ap.lower() == u" pm" and hr != 12:
                    hr += 12
                elif ap.lower() == u" am" and hr == 12:
                    hr -= 12
        except:
            hr = 0
            m = 0
    else:
        hr = 0
        m = 0
    if hr < 0 or hr > 23 or m < 0 or m > 59:
        hr = 0
        m = 0
    te = datetime.datetime.fromordinal(helpers.tryInt(d))
    foreign_timezone = get_network_timezone(network, network_dict)
    foreign_naive = datetime.datetime(te.year, te.month, te.day, hr, m, tzinfo=foreign_timezone)
    try:
        return foreign_naive.astimezone(sb_timezone)
    except (ValueError):
        return foreign_naive
Exemplo n.º 7
0
def parse_date_time(d, t, network):
    """
    Parse date and time string into local time

    :param d: date string
    :param t: time string
    :param network: network to use as base
    :return: datetime object containing local time
    """

    if not network_dict:
        load_network_dict()

    mo = time_regex.search(t)
    if mo is not None and len(mo.groups()) >= 5:
        if mo.group(5) is not None:
            try:
                hr = helpers.tryInt(mo.group(1))
                m = helpers.tryInt(mo.group(4))
                ap = mo.group(5)
                # convert am/pm to 24 hour clock
                if ap is not None:
                    if pm_regex.search(ap) is not None and hr != 12:
                        hr += 12
                    elif am_regex.search(ap) is not None and hr == 12:
                        hr -= 12
            except Exception:
                hr = 0
                m = 0
        else:
            try:
                hr = helpers.tryInt(mo.group(1))
                m = helpers.tryInt(mo.group(6))
            except Exception:
                hr = 0
                m = 0
    else:
        hr = 0
        m = 0
    if hr < 0 or hr > 23 or m < 0 or m > 59:
        hr = 0
        m = 0

    te = datetime.datetime.fromordinal(helpers.tryInt(d) or 1)
    try:
        foreign_timezone = get_network_timezone(network, network_dict)
        foreign_naive = datetime.datetime(te.year,
                                          te.month,
                                          te.day,
                                          hr,
                                          m,
                                          tzinfo=foreign_timezone)
        return foreign_naive
    except Exception:
        return datetime.datetime(te.year,
                                 te.month,
                                 te.day,
                                 hr,
                                 m,
                                 tzinfo=sb_timezone)
Exemplo n.º 8
0
def _get_numbering_for_show(tbl, indexer, indexer_id):

    result = {}

    if None is not indexer_id:
        if 'tv_episodes' == tbl:
            xem_refresh(indexer_id, indexer)

        my_db = db.DBConnection()
        # noinspection SqlResolve
        rows = my_db.select(
            'SELECT season, episode, scene_season, scene_episode'
            ' FROM %s' % tbl +
            ' WHERE indexer = ? AND %s = ?' % ('indexer_id', 'showid')['tv_episodes' == tbl] +
            ' AND (scene_season OR scene_episode) != 0'
            ' ORDER BY season, episode',
            [int(indexer), int(indexer_id)])

        for row in rows:
            season, episode = tryInt(row['season'], None), tryInt(row['episode'], None)
            if None is not season and None is not episode:
                scene_season, scene_episode = tryInt(row['scene_season'], None), tryInt(row['scene_episode'], None)
                if None is not scene_season and None is not scene_episode:
                    result[(season, episode)] = (scene_season, scene_episode)

    return result
Exemplo n.º 9
0
    def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        for mode in search_strings.keys():
            for search_string in search_strings[mode]:
                try:
                    self.search_params.update({'type': ('search', 'rss')[mode == 'RSS'], 'search': search_string.strip()})
                    data = self.getURL(self.urls['rss'], params=self.search_params)
                    if not data:
                        continue

                    data = xmltodict.parse(data)
                    for item in data['rss']['channel']['item']:
                        title = item['title']
                        info_hash = item['info_hash']
                        url = item['enclosure']['@url']
                        size = int(item['enclosure']['@length'] or item['size'])
                        seeders = helpers.tryInt(item['seeders'],0)
                        leechers = helpers.tryInt(item['leechers'],0)

                        if not seeders or seeders < self.minseed or leechers < self.minleech:
                            continue

                        items[mode].append((title, url, seeders, leechers, size, info_hash))

                except Exception:
                    logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)

            results += items[mode]

        return results
Exemplo n.º 10
0
def get_aired_in_season(show, return_sql=False):
    ep_count = {}
    ep_count_scene = {}
    tomorrow = (datetime.date.today() + datetime.timedelta(days=1)).toordinal()
    my_db = db.DBConnection()

    if show.air_by_date:
        sql_string = 'SELECT ep.status, ep.season, ep.scene_season, ep.episode, ep.airdate ' + \
                     'FROM [tv_episodes] AS ep, [tv_shows] AS show ' + \
                     'WHERE season != 0 AND ep.showid = show.indexer_id AND show.paused = 0 ' + \
                     'AND ep.showid = ? AND ep.indexer = ? AND show.air_by_date = 1'
    else:
        sql_string = 'SELECT status, season, scene_season, episode, airdate ' + \
                     'FROM [tv_episodes] ' + \
                     'WHERE showid = ? AND indexer = ? AND season > 0'

    sql_results = my_db.select(sql_string, [show.indexerid, show.indexer])
    for result in sql_results:
        if 1 < helpers.tryInt(result['airdate']) <= tomorrow:
            cur_season = helpers.tryInt(result['season'])
            ep_count[cur_season] = ep_count.setdefault(cur_season, 0) + 1
            cur_scene_season = helpers.tryInt(result['scene_season'], -1)
            if -1 != cur_scene_season:
                ep_count_scene[cur_scene_season] = ep_count.setdefault(cur_scene_season, 0) + 1

    if return_sql:
        return ep_count, ep_count_scene, sql_results

    return ep_count, ep_count_scene
Exemplo n.º 11
0
def get_trakt_ids(url_trakt):
    ids = {}
    for url_key in url_trakt.iterkeys():
        try:
            res = TraktAPI().trakt_request(url_trakt.get_url(url_key))
            if res:
                found = False
                for r in res:
                    if r.get(
                            'type', ''
                    ) == 'show' and 'show' in r and 'ids' in r['show']:
                        ids[INDEXER_TVDB] = tryInt(r['show']['ids'].get(
                            'tvdb', 0))
                        ids[INDEXER_TVRAGE] = tryInt(r['show']['ids'].get(
                            'tvrage', 0))
                        ids[INDEXER_IMDB] = tryInt(
                            str(r['show']['ids'].get('imdb')).replace(
                                'tt', ''))
                        ids[INDEXER_TRAKT] = tryInt(r['show']['ids'].get(
                            'trakt', 0))
                        ids[INDEXER_TMDB] = tryInt(r['show']['ids'].get(
                            'tmdb', 0))
                        found = True
                        break
                if found:
                    break
        except (TraktAuthException, TraktException, IndexError, KeyError):
            pass
    return {k: v for k, v in ids.iteritems() if v not in (None, '', 0)}
Exemplo n.º 12
0
    def __init__(self, name, url, key='', cat_ids=None, search_mode=None, search_fallback=False,
                 enable_recentsearch=False, enable_backlog=False, enable_scheduled_backlog=False, server_type=None):
        generic.NZBProvider.__init__(self, name, True, False)

        self.url = url
        self.key = key
        self.server_type = tryInt(server_type, None) or NewznabConstants.SERVER_DEFAULT
        self._exclude = set()
        self.cat_ids = cat_ids or ''
        self._cat_ids = None
        self.search_mode = search_mode or 'eponly'
        self.search_fallback = bool(tryInt(search_fallback))
        self.enable_recentsearch = bool(tryInt(enable_recentsearch))
        self.enable_backlog = bool(tryInt(enable_backlog))
        self.enable_scheduled_backlog = bool(tryInt(enable_scheduled_backlog, 1))
        self.needs_auth = '0' != self.key.strip()  # '0' in the key setting indicates that api_key is not needed
        self.default = False
        self._caps = {}
        self._caps_cats = {}
        self._caps_all_cats = []
        self._caps_need_apikey = {'need': False, 'date': datetime.date.fromordinal(1)}
        self._limits = 100
        self._last_recent_search = None
        self._caps_last_updated = datetime.datetime.fromordinal(1)
        self.cache = NewznabCache(self)
Exemplo n.º 13
0
    def check_users_watched(self, users, media_id):

        if not self.home_user_tokens:
            self.home_user_tokens = self.get_plex_home_user_tokens()

        result = {}
        if 'all' in users:
            users = self.home_user_tokens.keys()

        for user in users:
            user_media_page = self.get_url_pms('/library/metadata/%s' % media_id, token=self.home_user_tokens[user])
            if None is not user_media_page:
                video_node = user_media_page.find('Video')

                progress = 0
                if None is not video_node.get('viewOffset') and None is not video_node.get('duration'):
                    progress = tryInt(video_node.get('viewOffset')) * 100 / tryInt(video_node.get('duration'))

                played = int(video_node.get('viewCount') or 0)
                if not progress and not played:
                    continue

                date_watched = 0
                if (0 < tryInt(video_node.get('viewCount'))) or (0 < self.default_progress_as_watched < progress):
                    last_viewed_at = video_node.get('lastViewedAt')
                    if last_viewed_at and last_viewed_at not in ('', '0'):
                        date_watched = last_viewed_at

                if date_watched:
                    result[user] = dict(played=played, progress=progress, date_watched=date_watched)
            else:
                self.log('Do not have the token for %s.' % user)

        return result
Exemplo n.º 14
0
def _get_absolute_numbering_for_show(tbl, indexer, indexer_id):

    result = {}

    if None is not indexer_id:
        if 'tv_episodes' == tbl:
            xem_refresh(indexer_id, indexer)

        my_db = db.DBConnection()
        # noinspection SqlResolve
        rows = my_db.select(
            'SELECT season, episode, absolute_number, scene_absolute_number'
            ' FROM %s' % tbl + ' WHERE indexer = ? AND %s = ?' %
            ('indexer_id', 'showid')['tv_episodes' == tbl] +
            ' AND scene_absolute_number != 0'
            ' ORDER BY season, episode',
            [int(indexer), int(indexer_id)])

        for row in rows:
            season, episode, abs_num = map(
                lambda x: tryInt(row[x], None),
                ('season', 'episode', 'absolute_number'))
            if None is season and None is episode and None is not abs_num:
                season, episode, _ = _get_sea(indexer,
                                              indexer_id,
                                              absolute_number=abs_num)

            if None is not season and None is not episode:
                scene_absolute_number = tryInt(row['scene_absolute_number'],
                                               None)
                if None is not scene_absolute_number:
                    result[(season, episode)] = scene_absolute_number

    return result
Exemplo n.º 15
0
def _get_absolute_numbering_for_show(tbl, indexer, indexer_id):

    result = {}

    if None is not indexer_id:
        if 'tv_episodes' == tbl:
            xem_refresh(indexer_id, indexer)

        my_db = db.DBConnection()
        # noinspection SqlResolve
        rows = my_db.select(
            'SELECT season, episode, absolute_number, scene_absolute_number'
            ' FROM %s' % tbl +
            ' WHERE indexer = ? AND %s = ?' % ('indexer_id', 'showid')['tv_episodes' == tbl] +
            ' AND scene_absolute_number != 0'
            ' ORDER BY season, episode',
            [int(indexer), int(indexer_id)])

        for row in rows:
            season, episode, abs_num = map(lambda x: tryInt(row[x], None), ('season', 'episode', 'absolute_number'))
            if None is season and None is episode and None is not abs_num:
                season, episode, _ = _get_sea(indexer, indexer_id, absolute_number=abs_num)

            if None is not season and None is not episode:
                scene_absolute_number = tryInt(row['scene_absolute_number'], None)
                if None is not scene_absolute_number:
                    result[(season, episode)] = scene_absolute_number

    return result
Exemplo n.º 16
0
def wanted_episodes(show, from_date, make_dict=False, unaired=False):

    ep_count, ep_count_scene, sql_results_org = get_aired_in_season(show, return_sql=True)

    from_date_ord = from_date.toordinal()
    if unaired:
        sql_results = [s for s in sql_results_org if s['airdate'] > from_date_ord or s['airdate'] == 1]
    else:
        sql_results = [s for s in sql_results_org if s['airdate'] > from_date_ord]

    if make_dict:
        wanted = {}
    else:
        wanted = []

    total_wanted = total_replacing = total_unaired = 0

    if 0 < len(sql_results) and 2 < len(sql_results) - len(show.episodes):
        myDB = db.DBConnection()
        show_ep_sql = myDB.select('SELECT * FROM tv_episodes WHERE showid = ? AND indexer = ?',
                                  [show.indexerid, show.indexer])
    else:
        show_ep_sql = None

    for result in sql_results:
        ep_obj = show.getEpisode(int(result['season']), int(result['episode']), ep_sql=show_ep_sql)
        cur_status, cur_quality = common.Quality.splitCompositeStatus(ep_obj.status)
        ep_obj.wantedQuality = get_wanted_qualities(ep_obj, cur_status, cur_quality, unaired=unaired)
        if not ep_obj.wantedQuality:
            continue

        ep_obj.eps_aired_in_season = ep_count.get(helpers.tryInt(result['season']), 0)
        ep_obj.eps_aired_in_scene_season = ep_count_scene.get(
            helpers.tryInt(result['scene_season']), 0) if result['scene_season'] else ep_obj.eps_aired_in_season
        if make_dict:
            wanted.setdefault(ep_obj.scene_season if ep_obj.show.is_scene else ep_obj.season, []).append(ep_obj)
        else:
            wanted.append(ep_obj)

        if cur_status in (common.WANTED, common.FAILED):
            total_wanted += 1
        elif cur_status in (common.UNAIRED, common.SKIPPED, common.IGNORED, common.UNKNOWN):
            total_unaired += 1
        else:
            total_replacing += 1

    if 0 < total_wanted + total_replacing + total_unaired:
        actions = []
        for msg, total in ['%d episode%s', total_wanted], \
                          ['to upgrade %d episode%s', total_replacing], \
                          ['%d unaired episode%s', total_unaired]:
            if 0 < total:
                actions.append(msg % (total, helpers.maybe_plural(total)))
        logger.log(u'We want %s for %s' % (' and '.join(actions), show.name))

    return wanted
Exemplo n.º 17
0
def wanted_episodes(show, from_date, make_dict=False, unaired=False):

    ep_count, ep_count_scene, sql_results_org = get_aired_in_season(show, return_sql=True)

    from_date_ord = from_date.toordinal()
    if unaired:
        sql_results = [s for s in sql_results_org if s['airdate'] > from_date_ord or s['airdate'] == 1]
    else:
        sql_results = [s for s in sql_results_org if s['airdate'] > from_date_ord]

    if make_dict:
        wanted = {}
    else:
        wanted = []

    total_wanted = total_replacing = total_unaired = 0

    if 0 < len(sql_results) and 2 < len(sql_results) - len(show.episodes):
        myDB = db.DBConnection()
        show_ep_sql = myDB.select('SELECT * FROM tv_episodes WHERE showid = ? AND indexer = ?',
                                  [show.indexerid, show.indexer])
    else:
        show_ep_sql = None

    for result in sql_results:
        ep_obj = show.getEpisode(int(result['season']), int(result['episode']), ep_sql=show_ep_sql)
        cur_status, cur_quality = common.Quality.splitCompositeStatus(ep_obj.status)
        ep_obj.wantedQuality = get_wanted_qualities(ep_obj, cur_status, cur_quality, unaired=unaired)
        if not ep_obj.wantedQuality:
            continue

        ep_obj.eps_aired_in_season = ep_count.get(helpers.tryInt(result['season']), 0)
        ep_obj.eps_aired_in_scene_season = ep_count_scene.get(
            helpers.tryInt(result['scene_season']), 0) if result['scene_season'] else ep_obj.eps_aired_in_season
        if make_dict:
            wanted.setdefault(ep_obj.scene_season if ep_obj.show.is_scene else ep_obj.season, []).append(ep_obj)
        else:
            wanted.append(ep_obj)

        if cur_status in (common.WANTED, common.FAILED):
            total_wanted += 1
        elif cur_status in (common.UNAIRED, common.SKIPPED, common.IGNORED, common.UNKNOWN):
            total_unaired += 1
        else:
            total_replacing += 1

    if 0 < total_wanted + total_replacing + total_unaired:
        actions = []
        for msg, total in ['%d episode%s', total_wanted], \
                          ['to upgrade %d episode%s', total_replacing], \
                          ['%d unaired episode%s', total_unaired]:
            if 0 < total:
                actions.append(msg % (total, helpers.maybe_plural(total)))
        logger.log(u'We want %s for %s' % (' and '.join(actions), show.name))

    return wanted
Exemplo n.º 18
0
def parse_date_time(d, t, network):
    if network_dict is None:
        load_network_dict()
    mo = time_regex.search(t)
    if mo is not None and len(mo.groups()) >= 5:
        if mo.group(5) is not None:
            try:
                hr = helpers.tryInt(mo.group(1))
                m = helpers.tryInt(mo.group(4))
                ap = mo.group(5)
                # convert am/pm to 24 hour clock
                if ap is not None:
                    if pm_regex.search(ap) is not None and hr != 12:
                        hr += 12
                    elif am_regex.search(ap) is not None and hr == 12:
                        hr -= 12
            except:
                hr = 0
                m = 0
        else:
            try:
                hr = helpers.tryInt(mo.group(1))
                m = helpers.tryInt(mo.group(6))
            except:
                hr = 0
                m = 0
    else:
        hr = 0
        m = 0
    if hr < 0 or hr > 23 or m < 0 or m > 59:
        hr = 0
        m = 0

    te = datetime.datetime.fromordinal(helpers.tryInt(d))
    try:
        if sickbeard.TIMEZONE_DISPLAY == 'local':
            foreign_timezone = get_network_timezone(network, network_dict)
            foreign_naive = datetime.datetime(te.year,
                                              te.month,
                                              te.day,
                                              hr,
                                              m,
                                              tzinfo=foreign_timezone)
            return foreign_naive.astimezone(sb_timezone)
        else:
            return datetime.datetime(te.year,
                                     te.month,
                                     te.day,
                                     hr,
                                     m,
                                     tzinfo=sb_timezone)
    except:
        return datetime.datetime(te.year, te.month, te.day, hr, m)
Exemplo n.º 19
0
    def _episode_strings(self, ep_obj):

        search_params = []
        base_params = {}

        if not ep_obj:
            return [base_params]

        ep_detail = None
        if ep_obj.show.air_by_date or ep_obj.show.is_sports:
            airdate = str(ep_obj.airdate).split('-')
            base_params['season'] = airdate[0]
            if ep_obj.show.air_by_date:
                base_params['ep'] = '/'.join(airdate[1:])
                ep_detail = '+"%s.%s"' % (base_params['season'], '.'.join(airdate[1:]))
        elif ep_obj.show.is_anime:
            base_params['ep'] = '%i' % (helpers.tryInt(ep_obj.scene_absolute_number) or
                                        helpers.tryInt(ep_obj.scene_episode))
            ep_detail = '%02d' % helpers.tryInt(base_params['ep'])
        else:
            base_params['season'], base_params['ep'] = (
                (ep_obj.season, ep_obj.episode), (ep_obj.scene_season, ep_obj.scene_episode))[ep_obj.show.is_scene]
            ep_detail = sickbeard.config.naming_ep_type[2] % {
                'seasonnumber': helpers.tryInt(base_params['season'], 1),
                'episodenumber': helpers.tryInt(base_params['ep'], 1)}

        # id search
        params = base_params.copy()
        use_id = False
        for i in sickbeard.indexerApi().all_indexers:
            if i in ep_obj.show.ids and 0 < ep_obj.show.ids[i]['id'] and i in self.caps:
                params[self.caps[i]] = ep_obj.show.ids[i]['id']
                use_id = True
        use_id and search_params.append(params)

        spacer = 'nzbgeek.info' in self.url.lower() and ' ' or '.'
        # query search and exceptions
        name_exceptions = get_show_names(ep_obj, spacer)
        if sickbeard.scene_exceptions.has_abs_episodes(ep_obj):
            search_params.append({'q': '%s%s%s' % (ep_obj.show.name, spacer, base_params['ep'])})
        for cur_exception in name_exceptions:
            params = base_params.copy()
            params['q'] = cur_exception
            search_params.append(params)

            if ep_detail:
                params = base_params.copy()
                params['q'] = '%s%s%s' % (cur_exception, spacer, ep_detail)
                'season' in params and params.pop('season')
                'ep' in params and params.pop('ep')
                search_params.append(params)

        return [{'Episode': search_params}]
Exemplo n.º 20
0
    def __init__(self, name, url, cookies='', search_mode='eponly', search_fallback=False,
                 enable_recentsearch=False, enable_backlog=False, enable_scheduled_backlog=False):
        self.enable_backlog = bool(tryInt(enable_backlog))
        self.enable_scheduled_backlog = bool(tryInt(enable_scheduled_backlog))
        generic.TorrentProvider.__init__(self, name, supports_backlog=self.enable_backlog, cache_update_freq=15)

        self.url = url.rstrip('/')
        self.url_base = self.url
        self.cookies = cookies

        self.enable_recentsearch = bool(tryInt(enable_recentsearch)) or not self.enable_backlog
        self.search_mode = search_mode
        self.search_fallback = bool(tryInt(search_fallback))
Exemplo n.º 21
0
def parse_date_time(d, t, network):
    """
    Parse date and time string into local time

    :param d: date string
    :param t: time string
    :param network: network to use as base
    :return: datetime object containing local time
    """

    if not network_dict:
        load_network_dict()

    mo = time_regex.search(t)
    if mo is not None and len(mo.groups()) >= 5:
        if mo.group(5) is not None:
            try:
                hr = helpers.tryInt(mo.group(1))
                m = helpers.tryInt(mo.group(4))
                ap = mo.group(5)
                # convert am/pm to 24 hour clock
                if ap is not None:
                    if pm_regex.search(ap) is not None and hr != 12:
                        hr += 12
                    elif am_regex.search(ap) is not None and hr == 12:
                        hr -= 12
            except Exception:
                hr = 0
                m = 0
        else:
            try:
                hr = helpers.tryInt(mo.group(1))
                m = helpers.tryInt(mo.group(6))
            except Exception:
                hr = 0
                m = 0
    else:
        hr = 0
        m = 0
    if hr < 0 or hr > 23 or m < 0 or m > 59:
        hr = 0
        m = 0

    te = datetime.datetime.fromordinal(helpers.tryInt(d))
    try:
        foreign_timezone = get_network_timezone(network, network_dict)
        foreign_naive = datetime.datetime(te.year, te.month, te.day, hr, m, tzinfo=foreign_timezone)
        return foreign_naive
    except Exception:
        return datetime.datetime(te.year, te.month, te.day, hr, m, tzinfo=sb_timezone)
Exemplo n.º 22
0
def buildNameCache(show=None):
    """Adds all new name exceptions to the namecache memory and flushes any removed name exceptions

    :param show (optional): Only update namecache for this show object
    """
    global nameCache
    with nameCacheLock:

        if show:
            # search for only the requested show id and flush old show entries from namecache
            indexer_ids = [show.indexerid]
            nameCache = dict((k, v) for k, v in nameCache.items() if v != show.indexerid)

            # add standard indexer name to namecache
            nameCache[sickbeard.helpers.full_sanitizeSceneName(show.name)] = [show.indexerid, -1]
        else:
            # generate list of indexer ids to look up in cache.db
            indexer_ids = [x.indexerid for x in sickbeard.showList if x]

            # add all standard show indexer names to namecache
            nameCache = dict(
                (sickbeard.helpers.full_sanitizeSceneName(x.name), [x.indexerid, -1]) for x in sickbeard.showList if x)

        cacheDB = db.DBConnection()

        cache_results = cacheDB.select(
            'SELECT show_name, indexer_id, season FROM scene_exceptions WHERE indexer_id IN (%s)' % ','.join(
                ['?'] * len(indexer_ids)), indexer_ids)

        if cache_results:
            for cache_result in cache_results:
                indexer_id = int(cache_result['indexer_id'])
                season = tryInt(cache_result['season'], -1)
                name = sickbeard.helpers.full_sanitizeSceneName(cache_result['show_name'])
                nameCache[name] = [indexer_id, season]
Exemplo n.º 23
0
    def _search_provider(self, search_params, age=0, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        params = {'username': self.username, 'apikey': self.api_key,
                  'tv': 'true', 'fl': ('true', None)[not self.freeleech]}
        for mode in search_params.keys():
            for search_string in search_params[mode]:
                params['search'] = '+'.join(search_string.split())
                json_resp = self.get_url(self.urls['search'], params=params, json=True)

                cnt = len(items[mode])
                for k, item in json_resp.items():
                    if 'error' == k or not item.get('total_results'):
                        break
                    seeders, leechers, size = [tryInt(n, n) for n in [
                        item.get(x) for x in 'seeders', 'leechers', 'size']]
                    if self._peers_fail(mode, seeders, leechers):
                        continue
                    title = item.get('release_name')
                    tid, tpass = [item.get('torrent' + x) for x in 'id', 'pass']
                    download_url = all([tid, tpass]) and (self.urls['get'] % (tid, tpass))
                    if title and download_url:
                        items[mode].append((title, download_url, seeders, self._bytesizer('%smb' % size)))

                self._log_search(mode, len(items[mode]) - cnt, self.session.response['url'])
                time.sleep(1.1)
Exemplo n.º 24
0
    def _parseItem(self, item, ns):

        title = item.findtext('title')
        url = item.findtext('link')

        tvrageid = 0
        # don't use xpath because of the python 2.5 compatibility
        for subitem in item.findall(ns['newznab'] + 'attr'):
            if subitem.get('name') == "rageid":
                tvrageid = helpers.tryInt(subitem.get('value'))
                break

        self._checkItemAuth(title, url)

        if not title or not url:
            logger.log(
                u"The XML returned from the " + self.provider.name +
                " feed is incomplete, this result is unusable", logger.ERROR)
            return

        url = self._translateLinkURL(url)

        logger.log(u"Adding item from RSS to cache: " + title, logger.DEBUG)

        return self._addCacheEntry(title, url, tvrage_id=tvrageid)
Exemplo n.º 25
0
    def get_media_info(video_node):

        progress = 0
        if None is not video_node.get('viewOffset') and None is not video_node.get('duration'):
            progress = tryInt(video_node.get('viewOffset')) * 100 / tryInt(video_node.get('duration'))

        for media in video_node.findall('Media'):
            for part in media.findall('Part'):
                file_name = part.get('file')
                # if '3' > sys.version:  # remove HTML quoted characters, only works in python < 3
                #     file_name = urllib2.unquote(file_name.encode('utf-8', errors='replace'))
                # else:
                file_name = urllib2.unquote(file_name)

                return {'path_file': file_name, 'media_id': video_node.get('ratingKey'),
                        'played': int(video_node.get('viewCount') or 0), 'progress': progress}
Exemplo n.º 26
0
def _xem_get_ids(indexer_name, xem_origin):
    xem_ids = []

    url = 'http://thexem.de/map/havemap?origin=%s' % xem_origin

    task = 'Fetching show ids with%s xem scene mapping%s for origin'
    logger.log(u'%s %s' % (task % ('', 's'), indexer_name))
    parsed_json = helpers.getURL(url, json=True, timeout=90)
    if not parsed_json:
        logger.log(u'Failed %s %s, Unable to get URL: %s'
                   % (task.lower() % ('', 's'), indexer_name, url), logger.ERROR)
    else:
        if 'result' in parsed_json and 'success' == parsed_json['result'] and 'data' in parsed_json:
            try:
                for indexerid in parsed_json['data']:
                    xem_id = helpers.tryInt(indexerid)
                    if xem_id and xem_id not in xem_ids:
                        xem_ids.append(xem_id)
            except:
                pass
            if 0 == len(xem_ids):
                logger.log(u'Failed %s %s, no data items parsed from URL: %s'
                           % (task.lower() % ('', 's'), indexer_name, url), logger.WARNING)

    logger.log(u'Finished %s %s' % (task.lower() % (' %s' % len(xem_ids), helpers.maybe_plural(len(xem_ids))),
                                    indexer_name))
    return xem_ids
Exemplo n.º 27
0
def parse_date_time(d, t, network):

    if isinstance(t, tuple) and len(t) == 2 and isinstance(
            t[0], int) and isinstance(t[1], int):
        (hr, m) = t
    else:
        (hr, m) = parse_time(t)

    te = datetime.datetime.fromordinal(helpers.tryInt(d))
    try:
        if isinstance(network, datetime.tzinfo):
            foreign_timezone = network
        else:
            foreign_timezone = get_network_timezone(network)
        foreign_naive = datetime.datetime(te.year,
                                          te.month,
                                          te.day,
                                          hr,
                                          m,
                                          tzinfo=foreign_timezone)
        return foreign_naive
    except:
        return datetime.datetime(te.year,
                                 te.month,
                                 te.day,
                                 hr,
                                 m,
                                 tzinfo=sb_timezone)
Exemplo n.º 28
0
    def _change_missing_episodes():
        if not network_timezones.network_dict:
            network_timezones.update_network_dict()

        if network_timezones.network_dict:
            cur_date = (datetime.date.today() + datetime.timedelta(days=1)).toordinal()
        else:
            cur_date = (datetime.date.today() - datetime.timedelta(days=2)).toordinal()

        cur_time = datetime.datetime.now(network_timezones.sb_timezone)

        my_db = db.DBConnection()
        sql_results = my_db.select(
            'SELECT * FROM tv_episodes'
            ' WHERE status = ? AND season > 0 AND airdate <= ? AND airdate > 1'
            ' ORDER BY showid', [common.UNAIRED, cur_date])

        sql_l = []
        show = None
        wanted = False

        for sqlEp in sql_results:
            try:
                if not show or show.indexerid != int(sqlEp['showid']):
                    show = helpers.findCertainShow(sickbeard.showList, int(sqlEp['showid']))

                # for when there is orphaned series in the database but not loaded into our showlist
                if not show:
                    continue

            except exceptions.MultipleShowObjectsException:
                logger.log(u'ERROR: expected to find a single show matching %s' % sqlEp['showid'])
                continue

            try:
                end_time = (network_timezones.parse_date_time(sqlEp['airdate'], show.airs, show.network) +
                            datetime.timedelta(minutes=helpers.tryInt(show.runtime, 60)))
                # filter out any episodes that haven't aired yet
                if end_time > cur_time:
                    continue
            except (StandardError, Exception):
                # if an error occurred assume the episode hasn't aired yet
                continue

            ep = show.getEpisode(int(sqlEp['season']), int(sqlEp['episode']))
            with ep.lock:
                # Now that it is time, change state of UNAIRED show into expected or skipped
                ep.status = (common.WANTED, common.SKIPPED)[ep.show.paused]
                result = ep.get_sql()
                if None is not result:
                    sql_l.append(result)
                    wanted |= (False, True)[common.WANTED == ep.status]
        else:
            logger.log(u'No unaired episodes marked wanted')

        if 0 < len(sql_l):
            my_db = db.DBConnection()
            my_db.mass_action(sql_l)
            if wanted:
                logger.log(u'Found new episodes marked wanted')
Exemplo n.º 29
0
def get_tvmaze_by_name(showname, premiere_date):
    ids = {}
    try:
        url = '%ssearch/shows?%s' % (
            sickbeard.indexerApi(INDEXER_TVMAZE).config['base_url'],
            urlencode({'q': clean_show_name(showname)}))
        res = get_tvmaze_data(url=url,
                              json=True,
                              raise_status_code=True,
                              timeout=120)
        if res:
            for r in res:
                if 'show' in r and 'premiered' in r[
                        'show'] and 'externals' in r['show']:
                    premiered = parse(r['show']['premiered'], fuzzy=True)
                    if abs(premiere_date -
                           premiered.date()) < datetime.timedelta(days=2):
                        ids[INDEXER_TVRAGE] = r['show']['externals'].get(
                            'tvrage', 0)
                        ids[INDEXER_TVDB] = r['show']['externals'].get(
                            'thetvdb', 0)
                        ids[INDEXER_IMDB] = tryInt(
                            str(r['show']['externals'].get('imdb')).replace(
                                'tt', ''))
                        ids[INDEXER_TVMAZE] = r['show'].get('id', 0)
                        break
    except (StandardError, Exception):
        pass
    return {k: v for k, v in ids.iteritems() if v not in (None, '', 0)}
Exemplo n.º 30
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if self.show and not self.show.is_anime:
            return results

        items = {'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {
            'nodots': r'[\.\s]+', 'stats': r'S:\s*?(\d)+\s*L:\s*(\d+)', 'size': r'size:\s*(\d+[.,]\d+\w+)'}.iteritems())

        for mode in search_params.keys():
            for search_string in search_params[mode]:
                params = urllib.urlencode({'terms': rc['nodots'].sub(' ', search_string).encode('utf-8'), 'type': 1})

                search_url = '%ssearch.php?%s' % (self.url, params)

                html = self.get_url(search_url)
                if self.should_skip():
                    return self._sort_seeding(mode, results)

                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    with BS4Parser(html, parse_only=dict(table={'class': (lambda at: at and 'listing' in at)})) as tbl:
                        tbl_rows = [] if not tbl else tbl.find_all('tr')
                        if tbl_rows:
                            a = (0, 1)[None is not tbl_rows[0].find('td', class_='centertext')]

                            for top, bottom in zip(tbl_rows[a::2], tbl_rows[a+1::2]):
                                try:
                                    bottom_text = bottom.get_text() or ''
                                    stats = rc['stats'].findall(bottom_text)
                                    seeders, leechers = (0, 0) if not stats else [tryInt(n) for n in stats[0]]

                                    size = rc['size'].findall(bottom_text)
                                    size = size and size[0] or -1

                                    info = top.find('td', class_='desc-top')
                                    title = info and re.sub(r'[ .]{2,}', '.', info.get_text().strip())
                                    links = info and map(lambda l: l.get('href', ''), info.find_all('a')) or None
                                    download_url = self._link(
                                        (filter(lambda l: 'magnet:' in l, links)
                                         or filter(lambda l: not re.search(r'(magnet:|\.se).+', l), links))[0])
                                except (AttributeError, TypeError, ValueError, IndexError):
                                    continue

                                if title and download_url:
                                    items[mode].append((title, download_url, seeders, self._bytesizer(size)))

                except (BaseException, Exception):
                    time.sleep(1.1)

                self._log_search(mode, len(items[mode]) - cnt, search_url)

            results = self._sort_seeding(mode, results + items[mode])

        return results
Exemplo n.º 31
0
def _xem_get_ids(indexer_name, xem_origin):
    xem_ids = []

    url = 'http://thexem.de/map/havemap?origin=%s' % xem_origin

    task = 'Fetching show ids with%s xem scene mapping%s for origin'
    logger.log(u'%s %s' % (task % ('', 's'), indexer_name))
    parsed_json = helpers.getURL(url, json=True, timeout=90)
    if not parsed_json:
        logger.log(
            u'Failed %s %s, Unable to get URL: %s' %
            (task.lower() % ('', 's'), indexer_name, url), logger.ERROR)
    else:
        if 'result' in parsed_json and 'success' == parsed_json[
                'result'] and 'data' in parsed_json:
            try:
                for indexerid in parsed_json['data']:
                    xem_id = helpers.tryInt(indexerid)
                    if xem_id and xem_id not in xem_ids:
                        xem_ids.append(xem_id)
            except:
                pass
            if 0 == len(xem_ids):
                logger.log(
                    u'Failed %s %s, no data items parsed from URL: %s' %
                    (task.lower() % ('', 's'), indexer_name, url),
                    logger.WARNING)

    logger.log(u'Finished %s %s' %
               (task.lower() %
                (' %s' % len(xem_ids), helpers.maybe_plural(len(xem_ids))),
                indexer_name))
    return xem_ids
Exemplo n.º 32
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        for mode in search_params.keys():
            for search_string in search_params[mode]:
                search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
                search_url = self.urls['search'] % search_string

                data_json = self.get_url(search_url, headers=dict(Authorization='Bearer %s' % self._token), json=True)
                if self.should_skip():
                    return results

                cnt = len(items[mode])
                if data_json:
                    for tr in data_json.get('releases'):
                        seeders, leechers, size = (tryInt(n, n) for n in [
                            tr.get(x) for x in ('seeders', 'leechers', 'size')])
                        if not self._reject_item(seeders, leechers):
                            title, download_url = tr.get('releaseName'), self._link(tr.get('shortId'))
                            if title and download_url:
                                items[mode].append((title, download_url, seeders, self._bytesizer(size)))

                self._log_search(mode, len(items[mode]) - cnt, search_url)

            results = self._sort_seeding(mode, results + items[mode])

        return results
Exemplo n.º 33
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v))
                  for (k, v) in {'info': 'view', 'get': 'download', 'title': 'view\s+torrent\s+'}.items())
        for mode in search_params.keys():
            for search_string in search_params[mode]:
                search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
                # fetch 15 results by default, and up to 100 if allowed in user profile
                search_url = self.urls['search'] % (search_string, self._categories_string(mode, 'filter_cat[%s]=1'))

                html = self.get_url(search_url)

                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    with BS4Parser(html, features=['html5lib', 'permissive']) as soup:
                        torrent_table = soup.find('table', attrs={'class': 'torrent_table'})
                        torrent_rows = [] if not torrent_table else torrent_table.find_all('tr')

                        if 2 > len(torrent_rows):
                            raise generic.HaltParseException

                        for tr in torrent_rows[1:]:
                            try:
                                seeders, leechers, size = [tryInt(n, n) for n in [
                                    tr.find_all('td')[x].get_text().strip() for x in (-2, -1, -4)]]
                                if self._peers_fail(mode, seeders, leechers):
                                    continue

                                info = tr.find('a', title=rc['info'])
                                title = 'title' in info.attrs and rc['title'].sub('', info.attrs['title']) \
                                        or info.get_text().strip()

                                link = str(tr.find('a', title=rc['get'])['href']).replace('&amp;', '&').lstrip('/')
                                download_url = self.urls['get'] % link
                            except (AttributeError, TypeError, ValueError):
                                continue

                            if title and download_url:
                                items[mode].append((title, download_url, seeders, self._bytesizer(size)))

                except generic.HaltParseException:
                    pass
                except Exception:
                    logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR)
                self._log_search(mode, len(items[mode]) - cnt, search_url)

            self._sort_seeders(mode, items)

            results = list(set(results + items[mode]))

        return results
Exemplo n.º 34
0
    def _doSearch(self,
                  search_strings,
                  search_mode='eponly',
                  epcount=0,
                  age=0,
                  epObj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        for mode in search_strings.keys():
            for search_string in search_strings[mode]:
                try:
                    self.search_params.update({
                        'type': ('search', 'rss')[mode == 'RSS'],
                        'search':
                        search_string.strip()
                    })
                    data = self.getURL(self.urls['rss'],
                                       params=self.search_params)
                    if not data:
                        continue

                    data = xmltodict.parse(data)
                    for item in data['rss']['channel']['item']:
                        title = item['title']
                        info_hash = item['info_hash']
                        url = item['enclosure']['@url']
                        size = int(item['enclosure']['@length']
                                   or item['size'])
                        seeders = helpers.tryInt(item['seeders'], 0)
                        leechers = helpers.tryInt(item['leechers'], 0)

                        if not seeders or seeders < self.minseed or leechers < self.minleech:
                            continue

                        items[mode].append(
                            (title, url, seeders, leechers, size, info_hash))

                except Exception:
                    logger.log(
                        u"Failed parsing " + self.name + " Traceback: " +
                        traceback.format_exc(), logger.ERROR)

            results += items[mode]

        return results
Exemplo n.º 35
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {'info': 'detail', 'get': 'download', 'fl': '\[\W*F\W?L\W*\]'
                                                             }.items())
        for mode in search_params.keys():
            for search_string in search_params[mode]:
                search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
                search_url = self.urls['search'] % (search_string, self._categories_string())

                html = self.get_url(search_url, timeout=90)

                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    with BS4Parser(html, features=['html5lib', 'permissive'], attr='border="1"') as soup:
                        torrent_table = soup.find('table', attrs={'border': '1'})
                        torrent_rows = [] if not torrent_table else torrent_table.find_all('tr')

                        if 2 > len(torrent_rows):
                            raise generic.HaltParseException

                        for tr in torrent_rows[1:]:
                            try:
                                info = tr.find('a', href=rc['info'])
                                seeders, leechers, size = [tryInt(n, n) for n in [
                                    tr.find_all('td')[x].get_text().strip() for x in (-2, -1, -4)]]
                                if self.freeleech and (len(info.contents) < 2 or not rc['fl'].search(info.contents[1].string.strip())) \
                                        or self._peers_fail(mode, seeders, leechers):
                                    continue

                                title = 'title' in info.attrs and info.attrs['title'] or info.contents[0]
                                title = (isinstance(title, list) and title[0] or title).strip()
                                download_url = self.urls['get'] % str(tr.find('a', href=rc['get'])['href']).lstrip('/')
                            except (AttributeError, TypeError, ValueError):
                                continue

                            if title and download_url:
                                items[mode].append((title, download_url, seeders, self._bytesizer(size)))

                except generic.HaltParseException:
                    pass
                except Exception:
                    logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR)

                self._log_search(mode, len(items[mode]) - cnt, search_url)

            self._sort_seeders(mode, items)

            results = list(set(results + items[mode]))

        return results
Exemplo n.º 36
0
    def html(self, mode, search_string, results):

        if 'Content-Type' in self.session.headers:
            del (self.session.headers['Content-Type'])
        setattr(self.session, 'reserved', {'headers': {
            'Accept': 'text/html, application/xhtml+xml, */*', 'Accept-Language': 'en-GB',
            'Cache-Control': 'no-cache', 'Referer': 'https://broadcasthe.net/login.php', 'User-Agent': self.ua}})
        self.headers = None

        if self.auth_html or self._authorised_html():
            del (self.session.reserved['headers']['Referer'])
            if 'Referer' in self.session.headers:
                del (self.session.headers['Referer'])
            self.auth_html = True

            search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
            search_url = self.urls['search'] % (search_string, self._categories_string(mode, 'filter_cat[%s]=1'))

            html = self.get_url(search_url, use_tmr_limit=False)
            if self.should_skip(log_warning=False, use_tmr_limit=False):
                return results

            cnt = len(results)
            try:
                if not html or self._has_no_results(html):
                    raise generic.HaltParseException

                with BS4Parser(html, features=['html5lib', 'permissive']) as soup:
                    torrent_table = soup.find(id='torrent_table')
                    torrent_rows = [] if not torrent_table else torrent_table.find_all('tr')

                    if 2 > len(torrent_rows):
                        raise generic.HaltParseException

                    rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {
                        'cats': '(?i)cat\[(?:%s)\]' % self._categories_string(mode, template='', delimiter='|'),
                        'get': 'download'}.items())

                    head = None
                    for tr in torrent_rows[1:]:
                        cells = tr.find_all('td')
                        if 5 > len(cells):
                            continue
                        try:
                            head = head if None is not head else self._header_row(tr)
                            seeders, leechers, size = [tryInt(n, n) for n in [
                                cells[head[x]].get_text().strip() for x in 'seed', 'leech', 'size']]
                            if not tr.find('a', href=rc['cats']) or self._reject_item(
                                    seeders, leechers, container=self.reject_m2ts and (
                                            re.search(r'(?i)\[.*?m2?ts.*?\]', tr.get_text('', strip=True)))):
                                continue

                            title = tr.select('td span[title]')[0].attrs.get('title').strip()
                            download_url = self._link(tr.find('a', href=rc['get'])['href'])
                        except (AttributeError, TypeError, ValueError, KeyError, IndexError):
                            continue

                        if title and download_url:
                            results.append((title, download_url, seeders, self._bytesizer(size)))
Exemplo n.º 37
0
    def _episode_strings(self, ep_obj, **kwargs):

        return super(TVChaosUKProvider, self)._episode_strings(ep_obj, scene=False, prefix='%', date_detail=(
            lambda d: [x.strip('0') for x in (
                ['{0} {1}% {2}'.format(d.strftime('%d')[-1], d.strftime('%b'), d.strftime('%Y'))] +
                [d.strftime('%d %b %Y')] + ([d.strftime('%d %B %Y')], [])[d.strftime('%b') == d.strftime('%B')])]),
            ep_detail=(lambda e: [naming_ep_type[2] % e] + (
                [], ['%(episodenumber)dof' % e])[1 == tryInt(e.get('seasonnumber'))]), **kwargs)
Exemplo n.º 38
0
    def html(self, mode, search_string, results):

        if 'Content-Type' in self.session.headers:
            del (self.session.headers['Content-Type'])
        setattr(self.session, 'reserved', {'headers': {
            'Accept': 'text/html, application/xhtml+xml, */*', 'Accept-Language': 'en-GB',
            'Cache-Control': 'no-cache', 'Referer': 'https://broadcasthe.net/login.php', 'User-Agent': self.ua}})
        self.headers = None

        if self.auth_html or self._authorised_html():
            del (self.session.reserved['headers']['Referer'])
            if 'Referer' in self.session.headers:
                del (self.session.headers['Referer'])
            self.auth_html = True

            search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
            search_url = self.urls['search'] % (search_string, self._categories_string(mode, 'filter_cat[%s]=1'))

            html = self.get_url(search_url, use_tmr_limit=False)
            if self.should_skip(log_warning=False, use_tmr_limit=False):
                return results

            cnt = len(results)
            try:
                if not html or self._has_no_results(html):
                    raise generic.HaltParseException

                with BS4Parser(html, features=['html5lib', 'permissive']) as soup:
                    torrent_table = soup.find(id='torrent_table')
                    torrent_rows = [] if not torrent_table else torrent_table.find_all('tr')

                    if 2 > len(torrent_rows):
                        raise generic.HaltParseException

                    rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {
                        'cats': '(?i)cat\[(?:%s)\]' % self._categories_string(mode, template='', delimiter='|'),
                        'get': 'download'}.items())

                    head = None
                    for tr in torrent_rows[1:]:
                        cells = tr.find_all('td')
                        if 5 > len(cells):
                            continue
                        try:
                            head = head if None is not head else self._header_row(tr)
                            seeders, leechers, size = [tryInt(n, n) for n in [
                                cells[head[x]].get_text().strip() for x in 'seed', 'leech', 'size']]
                            if ((self.reject_m2ts and re.search(r'(?i)\[.*?m2?ts.*?\]', tr.get_text('', strip=True))) or
                                    self._peers_fail(mode, seeders, leechers) or not tr.find('a', href=rc['cats'])):
                                continue

                            title = tr.select('td span[title]')[0].attrs.get('title').strip()
                            download_url = self._link(tr.find('a', href=rc['get'])['href'])
                        except (AttributeError, TypeError, ValueError, KeyError, IndexError):
                            continue

                        if title and download_url:
                            results.append((title, download_url, seeders, self._bytesizer(size)))
Exemplo n.º 39
0
    def _change_missing_episodes():
        if not network_timezones.network_dict:
            network_timezones.update_network_dict()

        if network_timezones.network_dict:
            cur_date = (datetime.date.today() + datetime.timedelta(days=1)).toordinal()
        else:
            cur_date = (datetime.date.today() - datetime.timedelta(days=2)).toordinal()

        cur_time = datetime.datetime.now(network_timezones.sb_timezone)

        my_db = db.DBConnection()
        sql_results = my_db.select('SELECT * FROM tv_episodes WHERE status = ? AND season > 0 AND airdate <= ? AND airdate > 1',
                                   [common.UNAIRED, cur_date])

        sql_l = []
        show = None
        wanted = False

        for sqlEp in sql_results:
            try:
                if not show or show.indexerid != int(sqlEp['showid']):
                    show = helpers.findCertainShow(sickbeard.showList, int(sqlEp['showid']))

                # for when there is orphaned series in the database but not loaded into our showlist
                if not show:
                    continue

            except exceptions.MultipleShowObjectsException:
                logger.log(u'ERROR: expected to find a single show matching %s' % sqlEp['showid'])
                continue

            try:
                end_time = (network_timezones.parse_date_time(sqlEp['airdate'], show.airs, show.network) +
                            datetime.timedelta(minutes=helpers.tryInt(show.runtime, 60)))
                # filter out any episodes that haven't aired yet
                if end_time > cur_time:
                    continue
            except:
                # if an error occurred assume the episode hasn't aired yet
                continue

            ep = show.getEpisode(int(sqlEp['season']), int(sqlEp['episode']))
            with ep.lock:
                # Now that it is time, change state of UNAIRED show into expected or skipped
                ep.status = (common.WANTED, common.SKIPPED)[ep.show.paused]
                result = ep.get_sql()
                if None is not result:
                    sql_l.append(result)
                    wanted |= (False, True)[common.WANTED == ep.status]
        else:
            logger.log(u'No unaired episodes marked wanted')

        if 0 < len(sql_l):
            my_db = db.DBConnection()
            my_db.mass_action(sql_l)
            if wanted:
                logger.log(u'Found new episodes marked wanted')
Exemplo n.º 40
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {'info': 'detail', 'get': 'download'}.items())
        for mode in search_params.keys():
            for search_string in search_params[mode]:
                search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
                search_url = self.urls['browse'] % (self._categories_string(), ('3', '0')[not self.freeleech],
                                                    (self.urls['search'] % search_string, '')['Cache' == mode])

                html = self.get_url(search_url)
                if self.should_skip():
                    return results

                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    html = html.replace('<?xml version="1.0" encoding="iso-8859-1"?>', '')
                    html = re.sub(r'(</td>)[^<]*</td>', r'\1', html)
                    html = re.sub(r'(<a[^<]*)<a[^<]*?href=details[^<]*', r'\1', html)
                    with BS4Parser(html, 'html.parser') as soup:
                        shows_found = False
                        torrent_rows = soup.find_all('tr')
                        for index, row in enumerate(torrent_rows):
                            if 'type' == row.find_all('td')[0].get_text().strip().lower():
                                shows_found = index
                                break

                        if not shows_found or 2 > (len(torrent_rows) - shows_found):
                            raise generic.HaltParseException

                        head = None
                        for tr in torrent_rows[1 + shows_found:]:
                            cells = tr.find_all('td')
                            if 4 > len(cells):
                                continue
                            try:
                                head = head if None is not head else self._header_row(torrent_rows[shows_found])
                                seeders, leechers, size = [tryInt(n, n) for n in [
                                    cells[head[x]].get_text().strip() for x in 'seed', 'leech', 'size']]
                                if self._peers_fail(mode, seeders, leechers):
                                    continue

                                info = tr.find('a', href=rc['info'])
                                title = (info.attrs.get('title') or info.get_text()).strip()
                                download_url = self._link(tr.find('a', href=rc['get'])['href'])
                            except (AttributeError, TypeError, ValueError, KeyError):
                                continue

                            if title and download_url:
                                items[mode].append((title, download_url, seeders, self._bytesizer(size)))
Exemplo n.º 41
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {'info': 'detail', 'get': 'download',
                                                             'cats': 'cat=(?:%s)' % self._categories_string(template='', delimiter='|')
                                                             }.items())
        for mode in search_params.keys():
            for search_string in search_params[mode]:
                search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
                search_url = self.urls['search'] % (self._categories_string(), search_string)

                html = self.get_url(search_url)

                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    with BS4Parser(html, features=['html5lib', 'permissive']) as soup:
                        torrent_table = soup.find('table', id='torrentsTable')
                        torrent_rows = [] if not torrent_table else torrent_table.find_all('tr')

                        if 2 > len(torrent_rows):
                            raise generic.HaltParseException

                        for tr in torrent_rows[1:]:
                            try:
                                seeders, leechers, size = [tryInt(n, n) for n in [
                                    tr.find_all('td')[x].get_text().strip() for x in (-2, -1, -3)]]
                                if None is tr.find('a', href=rc['cats']) or self._peers_fail(mode, seeders, leechers):
                                    continue

                                info = tr.find('a', href=rc['info'])
                                title = 'title' in info.attrs and info.attrs['title'] or info.get_text().strip()
                                download_url = self.urls['get'] % tr.find('a', href=rc['get']).get('href')
                            except (AttributeError, TypeError, ValueError):
                                continue

                            if title and download_url:
                                items[mode].append((title, download_url, seeders, self._bytesizer(size)))

                except generic.HaltParseException:
                    pass
                except Exception:
                    logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR)

                self._log_search(mode, len(items[mode]) - cnt, search_url)

            self._sort_seeders(mode, items)

            results = list(set(results + items[mode]))

        return results
Exemplo n.º 42
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        for mode in search_params.keys():
            rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {
                'info': 'detail', 'get': '.*id=(\d+).*', 'fl': '\[freeleech\]',
                'cats': 'cat=(?:%s)' % self._categories_string(mode=mode, template='', delimiter='|')}.items())

            for search_string in search_params[mode]:
                search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string

                search_url = self.urls['search'] % (self._categories_string(),
                                                    '+'.join(search_string.replace('.', ' ').split()),
                                                    ('', '&freeleech=on')[self.freeleech])
                html = self.get_url(search_url)
                if self.should_skip():
                    return results

                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    with BS4Parser(html, features=['html5lib', 'permissive']) as soup:
                        torrent_table = soup.find('table', attrs={'cellpadding': 5})
                        torrent_rows = [] if not torrent_table else torrent_table.find_all('tr')

                        if 2 > len(torrent_rows):
                            raise generic.HaltParseException

                        head = None
                        for tr in torrent_rows[1:]:
                            cells = tr.find_all('td')
                            if 4 > len(cells):
                                continue
                            try:
                                head = head if None is not head else self._header_row(tr)
                                seeders, leechers, size = [tryInt(n, n) for n in [
                                    cells[head[x]].get_text().strip() for x in 'seed', 'leech', 'size']]
                                if None is tr.find('a', href=rc['cats']) or self._reject_item(
                                        seeders, leechers,
                                        self.freeleech and (None is rc['fl'].search(cells[1].get_text()))):
                                    continue

                                info = tr.find('a', href=rc['info'])
                                title = (info.attrs.get('title') or info.get_text()).strip()
                                download_url = self._link('%s/%s' % (
                                    re.sub(rc['get'], r'\1', str(info.attrs['href'])), str(title).replace(' ', '.')))
                            except (AttributeError, TypeError, ValueError, KeyError):
                                continue

                            if title and download_url:
                                items[mode].append((title, download_url, seeders, self._bytesizer(size)))
Exemplo n.º 43
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {'info': 'detail', 'get': 'download'}.items())
        for mode in search_params.keys():
            for search_string in search_params[mode]:
                search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
                search_url = self.urls['browse'] % (self._categories_string(), ('3', '0')[not self.freeleech],
                                                    (self.urls['search'] % search_string, '')['Cache' == mode])

                html = self.get_url(search_url)
                if self.should_skip():
                    return results

                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    html = html.replace('<?xml version="1.0" encoding="iso-8859-1"?>', '')
                    html = re.sub(r'(</td>)[^<]*</td>', r'\1', html)
                    html = re.sub(r'(<a[^<]*)<a[^<]*?href=details[^<]*', r'\1', html)
                    with BS4Parser(html, 'html.parser') as soup:
                        shows_found = False
                        torrent_rows = soup.find_all('tr')
                        for index, row in enumerate(torrent_rows):
                            if 'type' == row.find_all('td')[0].get_text().strip().lower():
                                shows_found = index
                                break

                        if not shows_found or 2 > (len(torrent_rows) - shows_found):
                            raise generic.HaltParseException

                        head = None
                        for tr in torrent_rows[1 + shows_found:]:
                            cells = tr.find_all('td')
                            if 4 > len(cells):
                                continue
                            try:
                                head = head if None is not head else self._header_row(torrent_rows[shows_found])
                                seeders, leechers, size = [tryInt(n, n) for n in [
                                    cells[head[x]].get_text().strip() for x in 'seed', 'leech', 'size']]
                                if self._reject_item(seeders, leechers):
                                    continue

                                info = tr.find('a', href=rc['info'])
                                title = (info.attrs.get('title') or info.get_text()).strip()
                                download_url = self._link(tr.find('a', href=rc['get'])['href'])
                            except (AttributeError, TypeError, ValueError, KeyError):
                                continue

                            if title and download_url:
                                items[mode].append((title, download_url, seeders, self._bytesizer(size)))
Exemplo n.º 44
0
    def _search_provider(self, search_params, **kwargs):

        results = []

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v))
                  for (k, v) in {'get': 'magnet:'}.items())
        for mode in search_params.keys():
            for search_string in search_params[mode]:
                search_string = isinstance(
                    search_string,
                    unicode) and unidecode(search_string) or search_string

                if 'Cache' == mode:
                    search_url = self.urls['search'] % tuple(
                        search_string.split(','))
                else:
                    search_url = self.urls['search'] % (search_string.replace(
                        '.', ' '), '')
                html = self.get_url(search_url)
                if self.should_skip():
                    return results

                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html) or re.search(
                            '<h3>Result.*?&quot;.*?&quot;</h3>', html):
                        raise generic.HaltParseException

                    with BS4Parser(html, features=['html5lib',
                                                   'permissive']) as soup:
                        torrent_table = soup.find(id='results')
                        torrent_rows = [] if not torrent_table else torrent_table.find_all(
                            'div', class_='result')

                        for tr in torrent_rows:
                            try:
                                seeders, leechers, size = [
                                    tryInt(n, n) for n in [
                                        tr['data-%s' % x].strip()
                                        for x in 'seeders', 'leechers', 'size'
                                    ]
                                ]
                                if self._peers_fail(mode, seeders, leechers):
                                    continue

                                title = tr['data-name'].strip()
                                download_url = self._link(
                                    tr.find('a', href=rc['get'])['href'])
                            except (AttributeError, TypeError, ValueError):
                                continue

                            if title and download_url:
                                items[mode].append(
                                    (title, download_url, seeders,
                                     self._bytesizer(size)))
Exemplo n.º 45
0
 def parse_ids(item, ns):
     ids = {}
     if 'newznab' in ns:
         for attr in item.findall('%sattr' % ns['newznab']):
             if attr.get('name', '') in NewznabConstants.providerToIndexerMapping:
                 v = helpers.tryInt(attr.get('value'))
                 if v > 0:
                     ids[NewznabConstants.providerToIndexerMapping[attr.get('name')]] = v
     return ids
Exemplo n.º 46
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {'info': 'detail', 'get': 'download',
                                                             'nuked': 'nuke', 'filter': 'free'}.items())
        for mode in search_params.keys():
            for search_string in search_params[mode]:
                search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
                search_url = self.urls['search'] % (search_string, self._categories_string(mode, '%s', ','))

                html = self.get_url(search_url, timeout=90)
                if self.should_skip():
                    return results

                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    with BS4Parser(html, attr='cellpadding="5"') as soup:
                        tbl = soup.find('table', class_='browse')
                        tbl_rows = [] if not tbl else tbl.find_all('tr')

                        if 2 > len(tbl_rows):
                            raise generic.HaltParseException

                        head = None
                        for tr in tbl_rows[1:]:
                            cells = tr.find_all('td')
                            if 5 > len(cells):
                                continue
                            try:
                                info = tr.find('a', href=rc['info'])
                                head = head if None is not head else self._header_row(tr)
                                seeders, leechers, size = [n for n in [
                                    cells[head[x]].get_text().strip() for x in 'leech', 'leech', 'size']]
                                seeders, leechers, size = [tryInt(n, n) for n in
                                                           list(re.findall(r'^(\d+)[^\d]+?(\d+)', leechers)[0])
                                                           + re.findall('^[^\n\t]+', size)]
                                if self._reject_item(seeders, leechers,
                                                     self.freeleech and (not tr.find('a', class_=rc['filter'])),
                                                     self.confirmed and (any([tr.find('img', alt=rc['nuked']),
                                                                              tr.find('img', class_=rc['nuked'])]))):
                                    continue

                                title = (info.attrs.get('title') or info.get_text()).strip()
                                download_url = self._link(tr.find('a', href=rc['get'])['href'])
                            except (AttributeError, TypeError, ValueError, KeyError):
                                continue

                            if title and download_url:
                                items[mode].append((title, download_url, seeders, self._bytesizer(size)))
Exemplo n.º 47
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {
            'info': 'detail', 'get': 'download', 'fl': '\(Freeleech\)'}.items())
        for mode in search_params.keys():
            for search_string in search_params[mode]:
                search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
                search_url = self.urls['search'] % (search_string, self._categories_string(mode),
                                                    ('3', '0')[not self.freeleech])

                html = self.get_url(search_url, timeout=90)
                if self.should_skip():
                    return results

                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    html = html.replace('<table width=100% border=0 align=center cellpadding=0 cellspacing=0>', '')
                    html = re.sub(r'(?s)(.*)(<table[^>]*?950[^>]*>.*)(</body>)', r'\1\3', html)
                    html = re.sub(r'(?s)<table[^>]+font[^>]+>', '<table id="parse">', html)
                    html = re.sub(r'(?s)(<td[^>]+>(?!<[ab]).*?)(?:(?:</[ab]>)+)', r'\1', html)
                    html = re.sub(r'(?m)^</td></tr></table>', r'', html)
                    with BS4Parser(html, features=['html5lib', 'permissive'], attr='id="parse"') as soup:
                        torrent_table = soup.find('table', id='parse')
                        torrent_rows = [] if not torrent_table else torrent_table.find_all('tr')

                        if 2 > len(torrent_rows):
                            raise generic.HaltParseException

                        head = None
                        for tr in torrent_rows[1:]:
                            cells = tr.find_all('td')
                            if 5 > len(cells):
                                continue
                            try:
                                head = head if None is not head else self._header_row(tr)
                                seeders, leechers, size = [tryInt(n, n) for n in [
                                    cells[head[x]].get_text().strip() for x in 'seed', 'leech', 'size']]
                                if self._reject_item(seeders, leechers):
                                    continue

                                info = tr.find('a', href=rc['info'])
                                title = (info.attrs.get('title') or info.get_text().split()[0]).strip()
                                download_url = self._link(tr.find('a', href=rc['get'])['href'])
                            except (AttributeError, TypeError, ValueError, KeyError):
                                continue

                            if title and download_url:
                                items[mode].append((title, download_url, seeders, self._bytesizer(size)))
Exemplo n.º 48
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {
            'info': '/t/', 'get': 'download', 'fl': '\[freeleech\]'}.items())

        for mode in search_params.keys():
            rc['cats'] = re.compile('(?i)(cat|c\[\])=(?:%s)' % self._categories_string(mode, template='', delimiter='|'))
            for search_string in search_params[mode]:
                post_data = dict((x.split('=') for x in self._categories_string(mode).split('&')),
                                 search=search_string.replace('.', ' ').replace('^@^', '.'),
                                 jxt=2, jxw='b', freeleech=('on', None)[not self.freeleech])

                data_json = self.get_url(self.urls['search'], post_data=post_data, json=True)
                if self.should_skip():
                    return results

                cnt = len(items[mode])
                try:
                    html = data_json.get('Fs', [{}])[0].get('Cn', [{}])[0].get('d')
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    with BS4Parser(html, features=['html5lib', 'permissive']) as soup:
                        torrent_table = soup.find('table', attrs={'cellspacing': 0}) or soup.find('table')
                        torrent_rows = [] if not torrent_table else torrent_table.find_all('tr')

                        if 2 > len(torrent_rows):
                            raise generic.HaltParseException

                        head = None
                        for tr in torrent_rows[1:]:
                            cells = tr.find_all('td')
                            if 4 > len(cells):
                                continue
                            try:
                                head = head if None is not head else self._header_row(tr)
                                seeders, leechers, size = [tryInt(n, n) for n in [
                                    cells[head[x]].get_text().strip() for x in 'seed', 'leech', 'size']]
                                if None is tr.find('a', href=rc['cats']) or self._reject_item(
                                        seeders, leechers, self.freeleech and (
                                        None is rc['fl'].search(cells[1].get_text()))):
                                    continue

                                info = tr.find('a', 'torrent') or tr.find('a', href=rc['info'])
                                title = (info.attrs.get('title') or info.get_text()).strip()
                                download_url = self._link(tr.find('a', href=rc['get'])['href'])
                            except (AttributeError, TypeError, ValueError):
                                continue

                            if title and download_url:
                                items[mode].append((title, download_url, seeders, self._bytesizer(size)))
Exemplo n.º 49
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {'info': 'detail', 'get': 'download',
                                                             'nuked': 'nuke', 'filter': 'free'}.items())
        for mode in search_params.keys():
            for search_string in search_params[mode]:
                search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
                search_url = self.urls['search'] % (search_string, self._categories_string(mode, '%s', ','))

                html = self.get_url(search_url, timeout=90)
                if self.should_skip():
                    return results

                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    with BS4Parser(html, features=['html5lib', 'permissive'], attr='cellpadding="5"') as soup:
                        torrent_table = soup.find('table', class_='browse')
                        torrent_rows = [] if not torrent_table else torrent_table.find_all('tr')

                        if 2 > len(torrent_rows):
                            raise generic.HaltParseException

                        head = None
                        for tr in torrent_rows[1:]:
                            cells = tr.find_all('td')
                            if 5 > len(cells):
                                continue
                            try:
                                info = tr.find('a', href=rc['info'])
                                head = head if None is not head else self._header_row(tr)
                                seeders, leechers, size = [n for n in [
                                    cells[head[x]].get_text().strip() for x in 'leech', 'leech', 'size']]
                                seeders, leechers, size = [tryInt(n, n) for n in
                                                           list(re.findall('^(\d+)[^\d]+?(\d+)', leechers)[0])
                                                           + re.findall('^[^\n\t]+', size)]
                                if self._reject_item(seeders, leechers,
                                                     self.freeleech and (not tr.find('a', class_=rc['filter'])),
                                                     self.confirmed and (any([tr.find('img', alt=rc['nuked']),
                                                                              tr.find('img', class_=rc['nuked'])]))):
                                    continue

                                title = (info.attrs.get('title') or info.get_text()).strip()
                                download_url = self._link(tr.find('a', href=rc['get'])['href'])
                            except (AttributeError, TypeError, ValueError, KeyError):
                                continue

                            if title and download_url:
                                items[mode].append((title, download_url, seeders, self._bytesizer(size)))
Exemplo n.º 50
0
    def _search_provider(self, search_params, **kwargs):

        results = []

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {
            'info': '(^(info|torrent)/|/[\w+]{40,}\s*$)', 'get': '^magnet:'}.items())

        for mode in search_params.keys():
            for search_string in search_params[mode]:
                search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string

                if 'Cache' == mode:
                    search_url = self.urls['search'] % tuple(search_string.split(','))
                else:
                    search_url = self.urls['search'] % (search_string.replace('.', ' '), '')
                html = self.get_url(search_url)
                if self.should_skip():
                    return results

                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    with BS4Parser(html, features=['html5lib', 'permissive']) as soup:
                        torrent_table = soup.find('table', attrs={'class': ['table', 'is-striped']})
                        torrent_rows = [] if not torrent_table else torrent_table.find_all('tr')

                        if 2 > len(torrent_rows):
                            raise generic.HaltParseException

                        head = None
                        for tr in torrent_rows[1:]:
                            cells = tr.find_all('td')
                            if 5 > len(cells):
                                continue
                            try:
                                head = head if None is not head else self._header_row(tr)
                                seeders, leechers, size = [tryInt(n, n) for n in [
                                    cells[head[x]].get_text().strip() for x in 'seed', 'leech', 'size']]
                                if self._reject_item(seeders, leechers):
                                    continue

                                info = tr.select(
                                    '[alt*="magnet"], [title*="magnet"], [alt*="torrent"], [title*="torrent"]')[0] \
                                    or tr.find('a', href=rc['info'])
                                title = re.sub('\s(using|use|magnet|link)', '', (
                                        info.attrs.get('title') or info.attrs.get('alt') or info.get_text())).strip()
                                download_url = self._link(tr.find('a', href=rc['get'])['href'])
                            except (AttributeError, TypeError, ValueError, KeyError):
                                continue

                            if title and download_url:
                                items[mode].append((title, download_url, seeders, self._bytesizer(size)))
Exemplo n.º 51
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v))
                  for (k, v) in {'info': 'view', 'get': 'download', 'name': 'showname', 'nuked': 'nuked'}.items())
        for mode in search_params.keys():
            for search_string in search_params[mode]:
                search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string
                search_url = self.urls['search'] % search_string

                # fetches 15 results by default, and up to 100 if allowed in user profile
                html = self.get_url(search_url)
                if self.should_skip():
                    return results

                cnt = len(items[mode])
                try:
                    if not html or self._has_no_results(html):
                        raise generic.HaltParseException

                    with BS4Parser(html, features=['html5lib', 'permissive']) as soup:
                        torrent_table = soup.find('table', class_='torrent_table')
                        torrent_rows = []
                        if torrent_table:
                            torrent_rows = torrent_table.find_all('tr')

                        if 2 > len(torrent_rows):
                            raise generic.HaltParseException

                        head = None
                        for tr in torrent_rows[1:]:
                            cells = tr.find_all('td')
                            if 5 > len(cells) or tr.find('img', alt=rc['nuked']):
                                continue
                            try:
                                head = head if None is not head else self._header_row(tr)
                                seeders, leechers, size = [tryInt(n, n) for n in [
                                    cells[head[x]].get_text().strip() for x in 'seed', 'leech', 'size']]
                                if self._reject_item(seeders, leechers):
                                    continue

                                title = tr.find('a', title=rc['info']).get_text().strip()
                                if title.lower().startswith('season '):
                                    title = '%s %s' % (tr.find('div', class_=rc['name']).get_text().strip(), title)

                                download_url = self._link(tr.find('a', href=rc['get'])['href'])
                            except (AttributeError, TypeError, ValueError):
                                continue

                            if title and download_url:
                                items[mode].append((title, download_url, seeders, self._bytesizer(size)))
Exemplo n.º 52
0
def parse_date_time(d, t, network):
    if network_dict is None:
        load_network_dict()
    mo = time_regex.search(t)
    if mo is not None and len(mo.groups()) >= 5:
        if mo.group(5) is not None:
            try:
                hr = helpers.tryInt(mo.group(1))
                m = helpers.tryInt(mo.group(4))
                ap = mo.group(5)
                # convert am/pm to 24 hour clock
                if ap is not None:
                    if pm_regex.search(ap) is not None and hr != 12:
                        hr += 12
                    elif am_regex.search(ap) is not None and hr == 12:
                        hr -= 12
            except:
                hr = 0
                m = 0
        else:
            try:
                hr = helpers.tryInt(mo.group(1))
                m = helpers.tryInt(mo.group(6))
            except:
                hr = 0
                m = 0
    else:
        hr = 0
        m = 0
    if hr < 0 or hr > 23 or m < 0 or m > 59:
        hr = 0
        m = 0

    te = datetime.datetime.fromordinal(helpers.tryInt(d))
    try:
        if sickbeard.TIMEZONE_DISPLAY == 'local':
            foreign_timezone = get_network_timezone(network, network_dict)
            foreign_naive = datetime.datetime(te.year, te.month, te.day, hr, m, tzinfo=foreign_timezone)
            return foreign_naive.astimezone(sb_timezone)
        else:
            return datetime.datetime(te.year, te.month, te.day, hr, m, tzinfo=sb_timezone)
    except:
        return datetime.datetime(te.year, te.month, te.day, hr, m)
Exemplo n.º 53
0
    def _search_provider(self, search_params, **kwargs):

        results = []
        if not self._authorised():
            return results

        items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []}

        rc = dict((k, re.compile('(?i)' + v)) for (k, v) in {'nodots': '[\.\s]+'}.items())
        for mode in search_params.keys():
            for search_string in search_params[mode]:
                search_string = isinstance(search_string, unicode) and unidecode(search_string) or search_string

                search_url = self.urls['browse'] % (self.user_authkey, self.user_passkey)
                if 'Cache' != mode:
                    search_url += self.urls['search'] % rc['nodots'].sub('+', search_string)

                data_json = self.get_url(search_url, json=True)
                if self.should_skip():
                    return results

                cnt = len(items[mode])
                try:
                    for item in data_json.get('response', {}).get('results', []):
                        if self.freeleech and not item.get('isFreeleech'):
                            continue

                        seeders, leechers, group_name, torrent_id, size = [tryInt(n, n) for n in [item.get(x) for x in [
                            'seeders', 'leechers', 'groupName', 'torrentId', 'size']]]
                        if self._reject_item(seeders, leechers):
                            continue

                        try:
                            title_parts = group_name.split('[')
                            maybe_res = re.findall('((?:72|108|216)0\w)', title_parts[1])
                            maybe_ext = re.findall('(?i)(%s)' % '|'.join(common.mediaExtensions), title_parts[1])
                            detail = title_parts[1].split('/')
                            detail[1] = detail[1].strip().lower().replace('mkv', 'x264')
                            title = '%s.%s' % (BS4Parser(title_parts[0].strip(), 'html.parser').soup.string, '.'.join(
                                (maybe_res and [maybe_res[0]] or []) +
                                [detail[0].strip(), detail[1], maybe_ext and maybe_ext[0].lower() or 'mkv']))
                        except (IndexError, KeyError):
                            title = self.regulate_title(item, group_name)
                        download_url = self.urls['get'] % (self.user_authkey, self.user_passkey, torrent_id)

                        if title and download_url:
                            items[mode].append((title, download_url, seeders, self._bytesizer(size)))

                except (StandardError, Exception):
                    logger.log(u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR)
                self._log_search(mode, len(items[mode]) - cnt, search_url)

            results = self._sort_seeding(mode, results + items[mode])

        return results
Exemplo n.º 54
0
    def _doSearch(self, search_strings, search_mode='eponly', epcount=0, age=0, epObj=None):

        results = []
        items = {'Season': [], 'Episode': [], 'RSS': []}

        for mode in search_strings.keys():
            for search_string in search_strings[mode]:
                try:
                    self.search_params.update({'type': ('search', 'rss')[mode == 'RSS'], 'search': search_string.strip()})
                    data = self.getURL(self.urls['rss'], params=self.search_params)
                    if not data:
                        logger.log(u'No response, skipping...', logger.DEBUG)
                        continue

                    data = xmltodict.parse(data)
                    if not all([data, 'rss' in data, 'channel' in data['rss'], 'item' in data['rss']['channel']]):
                        logger.log(u'Malformed rss returned, skipping...', logger.DEBUG)
                        continue

                    # https://github.com/martinblech/xmltodict/issues/111
                    entries = data['rss']['channel']['item']
                    entries = entries if isinstance(entries, list) else [entries]

                    for item in entries:
                        title = item['title']
                        info_hash = item['info_hash']
                        url = item['enclosure']['@url']
                        size = int(item['enclosure']['@length'] or item['size'])
                        seeders = helpers.tryInt(item['seeders'],0)
                        leechers = helpers.tryInt(item['leechers'],0)

                        if not seeders or seeders < self.minseed or leechers < self.minleech:
                            continue

                        items[mode].append((title, url, seeders, leechers, size, info_hash))

                except Exception:
                    logger.log(u"Failed parsing " + self.name + " Traceback: " + traceback.format_exc(), logger.ERROR)

            results += items[mode]

        return results