def _action(self, act, ids, filter_func): if isinstance(ids, (string_types, list)): item = dict(fail=[], ignore=[]) for task in filter_iter(filter_func, self._tinf(ids, use_props=False, err=True)): item[('fail', 'ignore')[self._ignore_state(task)]] += [task.get('hash')] # retry items that are not acted on retry_ids = item['fail'] tries = (1, 3, 5, 10, 15, 15, 30, 60) i = 0 while retry_ids: for i in tries: logger.log('%s: retry %s %s item(s) in %ss' % (self.name, act, len(item['fail']), i), logger.DEBUG) time.sleep(i) item['fail'] = [] for task in filter_iter(filter_func, self._tinf(retry_ids, use_props=False, err=True)): item[('fail', 'ignore')[self._ignore_state(task)]] += [task.get('hash')] if not item['fail']: retry_ids = None break retry_ids = item['fail'] else: if max(tries) == i: logger.log('%s: failed to %s %s item(s) after %s tries over %s mins, aborted' % (self.name, act, len(item['fail']), len(tries), sum(tries) / 60), logger.DEBUG) return (item['fail'] + item['ignore']) or True
def sortedProviderList(): initialList = sickbeard.providerList + sickbeard.newznabProviderList + sickbeard.torrentRssProviderList providerDict = dict(zip([x.get_id() for x in initialList], initialList)) newList = [] # add all modules in the priority list, in order for curModule in sickbeard.PROVIDER_ORDER: if curModule in providerDict: newList.append(providerDict[curModule]) if not sickbeard.PROVIDER_ORDER: nzb = filter_list( lambda p: p.providerType == generic.GenericProvider.NZB, itervalues(providerDict)) tor = filter_list( lambda p: p.providerType != generic.GenericProvider.NZB, itervalues(providerDict)) newList = sorted(filter_iter(lambda p: not p.anime_only, nzb), key=lambda v: v.get_id()) + \ sorted(filter_iter(lambda p: not p.anime_only, tor), key=lambda v: v.get_id()) + \ sorted(filter_iter(lambda p: p.anime_only, nzb), key=lambda v: v.get_id()) + \ sorted(filter_iter(lambda p: p.anime_only, tor), key=lambda v: v.get_id()) # add any modules that are missing from that list for curModule in providerDict: if providerDict[curModule] not in newList: newList.append(providerDict[curModule]) return newList
def get_data(self, url): result = None data_json = self.get_url(url % dict(ts=self.ts()), parse_json=True) if self.should_skip(): return result url = data_json.get('url', '') if url.lower().startswith('magnet:'): result = url else: from sickbeard import providers if 'torlock' in url.lower(): prov = next( filter_iter( lambda p: 'torlock' == p.name.lower(), (filter_iter( lambda sp: sp.providerType == self.providerType, providers.sortedProviderList())))) state = prov.enabled prov.enabled = True _ = prov.url prov.enabled = state if prov.url: try: result = prov.urls.get('get', '') % re.findall( r'(\d+).torrent', url)[0] except (IndexError, TypeError): pass return result
def get_data(self, url): result = None html = self.get_url(url) if self.should_skip(): return result with BS4Parser(html) as soup: re_showid = re.compile(r'(?i)hs_showid\s*=\s*(\d+)') try: hs_id = re_showid.findall( next( filter_iter( lambda s: re_showid.search(s), map_iter(lambda t: t.get_text(), soup.find_all('script')))))[0] except (BaseException, Exception): return result html = self.get_url(self.urls['get_data'] % hs_id) if self.should_skip(): return result with BS4Parser(html) as soup: try: result = sorted( map_iter( lambda t: t.get('href'), soup.find(id=re.findall(r'.*#(\d+-\d+\w)$', url)[0]). find_all( 'a', href=re.compile('(?i)(torrent$|^magnet:)'))))[0] except (BaseException, Exception): pass return result
def call(self): """Execute added multicall calls @return: the results (post-processed), in the order they were added @rtype: tuple """ xmc = xmlrpclib.MultiCall(self.rt_obj.get_connection()) for call in self.calls: method, args = call rpc_call = method.rpc_call if not self.rt_obj.method_exists(rpc_call): for alias in getattr(method, 'aliases', None) or []: if self.rt_obj.method_exists(alias): rpc_call = alias break getattr(xmc, rpc_call)(*args) try: results = tuple(next(filter_iter(lambda x: isinstance(x, list), xmc().results))) except IndexError: return [[]] results_processed = [] for r, c in list(zip(results, self.calls)): method = c[0] # Method instance result = process_result(method, r) results_processed.append(result) # assign result to class_obj exists = hasattr(self.class_obj, method.varname) if not exists or not inspect.ismethod(getattr(self.class_obj, method.varname)): setattr(self.class_obj, method.varname, result) return tuple(results_processed)
def _fanart_urls(tvdb_id, image_type='banner', lang='en', thumb=False): # type: (int, AnyStr, AnyStr, bool) -> Optional[List[int, int, AnyStr]] types = {'poster': fanart.TYPE.TV.POSTER, 'banner': fanart.TYPE.TV.BANNER, 'fanart': fanart.TYPE.TV.BACKGROUND, 'poster_thumb': fanart.TYPE.TV.POSTER, 'banner_thumb': fanart.TYPE.TV.BANNER} try: if tvdb_id: request = fanartRequest(apikey=sickbeard.FANART_API_KEY, tvdb_id=tvdb_id, types=types[image_type]) resp = request.response() itemlist = [] dedupe = [] for art in filter_iter(lambda i: 10 < len(i.get('url', '')) and (lang == i.get('lang', '')[0:2]), # remove "[0:2]" ... to strictly use only data where "en" is at source resp[types[image_type]]): # type: dict try: url = (art['url'], art['url'].replace('/fanart/', '/preview/'))[thumb] if url not in dedupe: dedupe += [url] itemlist += [ [int(art['id']), int(art['likes']), url] ] except (BaseException, Exception): continue itemlist.sort(key=lambda a: (a[1], a[0]), reverse=True) return itemlist except (BaseException, Exception): raise
def _get_method(self, *choices): try: return next( filter_iter(lambda method: self._rt_obj.method_exists(method), choices)) except (BaseException, Exception): pass
def mass_upsert_sql(table_name, value_dict, key_dict, sanitise=True): # type: (AnyStr, Dict, Dict, bool) -> List[List[AnyStr]] """ use with cl.extend(mass_upsert_sql(tableName, valueDict, keyDict)) :param table_name: table name :param value_dict: dict of values to be set {'table_fieldname': value} :param key_dict: dict of restrains for update {'table_fieldname': value} :param sanitise: True to remove k, v pairs in keyDict from valueDict as they must not exist in both. This option has a performance hit so it's best to remove key_dict keys from value_dict and set this False instead. :type sanitise: Boolean :return: list of 2 sql command """ cl = [] gen_params = (lambda my_dict: [x + ' = ?' for x in iterkeys(my_dict)]) # sanity: remove k, v pairs in keyDict from valueDict if sanitise: value_dict = dict(filter_iter(lambda k: k[0] not in key_dict, iteritems(value_dict))) # noinspection SqlResolve cl.append(['UPDATE [%s] SET %s WHERE %s' % (table_name, ', '.join(gen_params(value_dict)), ' AND '.join(gen_params(key_dict))), list_values(value_dict) + list_values(key_dict)]) # noinspection SqlResolve cl.append(['INSERT INTO [' + table_name + '] (' + ', '.join(["'%s'" % ('%s' % v).replace("'", "''") for v in itertools.chain(iterkeys(value_dict), iterkeys(key_dict))]) + ')' + ' SELECT ' + ', '.join(["'%s'" % ('%s' % v).replace("'", "''") for v in itertools.chain(itervalues(value_dict), itervalues(key_dict))]) + ' WHERE changes() = 0']) return cl
def load_torrent(self, data, extra=None, start=False, verbose=False, verify_load=True, verify_retries=3): """ Loads torrent into rTorrent (with various enhancements) @param data: can be a url, a path to a local file, or the raw data of a torrent file @type data: str @param extra: extra commands to send @type extra: array @param start: start torrent when loaded @type start: bool @param verbose: print error messages to rTorrent log @type verbose: bool @param verify_load: verify that torrent was added to rTorrent successfully @type verify_load: bool @param verify_retries: number of times to attempt verification @type verify_load: int @return: Depends on verify_load: - if verify_load is True, (and the torrent was loaded successfully), it'll return a L{Torrent} instance - if verify_load is False, it'll return None @rtype: L{Torrent} instance or None @raise AssertionError: If the torrent wasn't successfully added to rTorrent - Check L{TorrentParser} for the AssertionError's it raises @note: Because this function includes url verification (if a url was input) as well as verification as to whether the torrent was successfully added, this function doesn't execute instantaneously. If that's what you're looking for, use load_torrent_simple() instead. """ tp = TorrentParser(data) info_hash = tp.info_hash # load torrent self.execute_func(self._get_load_function('url', False, False), '') self.execute_func(self._get_load_function('raw', start, verbose), xmlrpclib.Binary(tp.raw_torrent), extra) t = None if verify_load: while verify_retries: try: t = next(filter_iter(lambda td: td.info_hash == info_hash, self.get_torrents())) break except (BaseException, Exception): time.sleep(self.request_interval) verify_retries -= 1 assert None is not t, 'Adding torrent was unsuccessful.' return t
def redirect_args(self, new_url, exclude=(None, ), **kwargs): args = '&'.join([ '%s=%s' % (k, v) for (k, v) in filter_iter( lambda arg: arg[1] not in exclude, iteritems(kwargs)) ]) self.redirect('%s%s' % (new_url, ('', '?' + args)[bool(args)]), permanent=True)
def load_magnet(self, magneturl, info_hash, extra=None, start=False, verbose=False, verify_load=True): func_name = self._get_load_function('url', start, verbose) # load magnet self.execute_func(func_name, magneturl, extra) t = None if verify_load: info_hash = info_hash.upper() max_retries = 10 while max_retries: try: t = next( filter_iter( lambda td: td.info_hash.upper() == info_hash, self.get_torrents())) break except (BaseException, Exception): time.sleep(self.request_interval) max_retries -= 1 return t
def enabled(self): """ Generator to yield iterable IDs for enabled notifiers :return: ID String :rtype: String """ for n in filter_iter(lambda v: v.is_enabled(), list_values(self.notifiers)): yield n.id()
def has_local_id(self, info_hash): method = rpc.find_method('d.get_local_id') result = True try: func = next(filter_iter(lambda m: self.method_exists(m), (method.rpc_call,) + method.aliases)) getattr(self.get_connection(), func)(info_hash) except (BaseException, Exception): result = False return result
def find_method(rpc_call): """Return L{Method} instance associated with given RPC call""" try: rpc_call = rpc_call.lower() return next(filter_iter(lambda m: rpc_call in map_list( lambda n: n.lower(), [m.rpc_call] + list(getattr(m, 'aliases', []))), rtorrent.methods + rtorrent.torrent.methods + rtorrent.file.methods + rtorrent.tracker.methods + rtorrent.peer.methods)) except IndexError: return -1
def is_available(self, rt_obj): if rt_obj.get_client_version_tuple() >= self.min_version: try: self.varname = get_varname(next(filter_iter(lambda f: rt_obj.method_exists(f), (self.rpc_call,) + tuple(getattr(self, 'aliases', ''))))) return True except IndexError: pass return False
def update(self): """Refresh rTorrent client info @note: All fields are stored as attributes to self. @return: None """ mc = rpc.Multicall(self) for method in filter_iter(lambda m: m.is_retriever() and m.is_available(self), methods): mc.add(method) mc.call()
def cleanup_old_db_backups(filename): try: d, filename = ek.ek(os.path.split, filename) if not d: d = sickbeard.DATA_DIR for f in filter_iter(lambda fn: fn.is_file() and filename in fn.name and re.search(r'\.db(\.v\d+)?\.r\d+$', fn.name), ek.ek(scandir, d)): try: ek.ek(os.unlink, f.path) except (BaseException, Exception): pass except (BaseException, Exception): pass
def update(self): """Refresh tracker data @note: All fields are stored as attributes to self. @return: None """ mc = rpc.Multicall(self) for method in filter_iter( lambda fx: fx.is_retriever() and fx.is_available(self._rt_obj), methods): mc.add(method, self.rpc_id) mc.call()
def _tinf(self, ids=None, use_props=True, err=False): # type: (Optional[list], bool, bool) -> list """ Fetch client task information :param ids: Optional id(s) to get task info for. None to get all task info :param use_props: Optional override forces retrieval of torrents info instead of torrent generic properties :param err: Optional return error dict instead of empty array :return: Zero or more task object(s) from response """ result = [] rids = (ids if isinstance(ids, (list, type(None))) else [x.strip() for x in ids.split(',')]) or [None] getinfo = use_props and None is not ids params = {} cmd = ('torrents/info', 'query/torrents')[not self.api_ns] if not getinfo: label = sickbeard.TORRENT_LABEL.replace(' ', '_') if label and not ids: params['category'] = label for rid in rids: if getinfo: if self.api_ns: cmd = 'torrents/properties' params['hash'] = rid else: cmd = 'query/propertiesGeneral/%s' % rid elif rid: params['hashes'] = rid try: tasks = self._client_request(cmd, params=params, timeout=60, json=True) result += tasks and (isinstance(tasks, list) and tasks or (isinstance(tasks, dict) and [tasks])) \ or ([], [{'state': 'error', 'hash': rid}])[err] except (BaseException, Exception): if getinfo: result += [dict(error=True, id=rid)] for t in filter_iter( lambda d: isinstance(d.get('name'), string_types) and d.get( 'name'), (result, [])[getinfo]): t['name'] = unquote_plus(t.get('name')) return result
def update(self): """Refresh torrent data @note: All fields are stored as attributes to self. @return: None """ mc = rpc.Multicall(self) for method in filter_iter( lambda m: m.is_retriever() and m.is_available(self._rt_obj), methods): mc.add(method, self.rpc_id) mc.call() # custom functions (only call private methods, since they only check # local variables and are therefore faster) self._call_custom_methods()
def _xem_get_ids(infosrc_name, xem_origin): """ :param infosrc_name: :type infosrc_name: AnyStr :param xem_origin: :type xem_origin: AnyStr :return: :rtype: List """ xem_ids = [] url = 'http://thexem.de/map/havemap?origin=%s' % xem_origin task = 'Fetching show ids with%s xem scene mapping%s for origin' logger.log(u'%s %s' % (task % ('', 's'), infosrc_name)) parsed_json = helpers.get_url(url, parse_json=True, timeout=90) if not isinstance(parsed_json, dict) or not parsed_json: logger.log( u'Failed %s %s, Unable to get URL: %s' % (task.lower() % ('', 's'), infosrc_name, url), logger.ERROR) else: if 'success' == parsed_json.get('result', '') and 'data' in parsed_json: xem_ids = list( set( filter_iter( lambda prodid: 0 < prodid, map_iter(lambda pid: helpers.try_int(pid), parsed_json['data'])))) if 0 == len(xem_ids): logger.log( u'Failed %s %s, no data items parsed from URL: %s' % (task.lower() % ('', 's'), infosrc_name, url), logger.WARNING) logger.log( u'Finished %s %s' % (task.lower() % (' %s' % len(xem_ids), helpers.maybe_plural(xem_ids)), infosrc_name)) return xem_ids
def execute_func(self, func_name, param=None, extra=None): param = ([param], param)[isinstance(param, list)] for x in (extra or []): try: call, arg = x.split('=') method = rpc.find_method(call) method_name = next(filter_iter(lambda m: self.method_exists(m), (method.rpc_call,) + method.aliases)) param += ['%s=%s' % (method_name, arg)] except (BaseException, Exception): pass method = getattr(self.get_connection(), func_name) if not self.use_target: try: method(*param) except (BaseException, Exception): self.use_target = True if self.use_target: method('', *param)
def _anidb_exceptions_fetcher(): global anidb_exception_dict if should_refresh('anidb'): logger.log(u'Checking for AniDB scene exception updates') for cur_show_obj in filter_iter( lambda _s: _s.is_anime and TVINFO_TVDB == _s.tvid, sickbeard.showList): try: anime = create_anidb_obj(name=cur_show_obj.name, tvdbid=cur_show_obj.prodid, autoCorrectName=True) except (BaseException, Exception): continue if anime.name and anime.name != cur_show_obj.name: anidb_exception_dict[(cur_show_obj.tvid, cur_show_obj.prodid)] = [{ anime.name: -1 }] set_last_refresh('anidb') return anidb_exception_dict
def _tinf(self, ids=None, err=False): # type: (Optional[list], bool) -> list """ Fetch client task information :param ids: Optional id(s) to get task info for. None to get all task info :param err: Optional return error dict instead of empty array :return: Zero or more task object(s) from response """ result = [] rids = (ids if isinstance(ids, (list, type(None))) else [x.strip() for x in ids.split(',')]) or [None] getinfo = None is not ids for rid in rids: try: if not self._testmode: # noinspection PyTypeChecker tasks = self._client_request( ('list', 'getinfo')[getinfo], t_id=rid, t_params=dict(additional='detail,file,transfer' ))['data']['tasks'] else: # noinspection PyUnresolvedReferences tasks = (filter_list(lambda d: d.get('id') == rid, self._testdata), self._testdata)[not rid] result += tasks and (isinstance(tasks, list) and tasks or (isinstance(tasks, dict) and [tasks])) \ or ([], [{'error': True, 'id': rid}])[err] except (BaseException, Exception): if getinfo: result += [dict(error=True, id=rid)] for t in filter_iter( lambda d: isinstance(d.get('title'), string_types) and d.get( 'title'), result): t['title'] = unquote_plus(t.get('title')) return result
def _get_proper_list(aired_since_shows, recent_shows, recent_anime, proper_dict=None): """ :param aired_since_shows: date since aired :type aired_since_shows: datetime.datetime :param recent_shows: list of recent shows :type recent_shows: List[Tuple[int, int]] :param recent_anime: list of recent anime shows :type recent_anime: List[Tuple[int, int]] :param proper_dict: dict with provider keys containing Proper objects :type proper_dict: dict :return: list of propers :rtype: List[sickbeard.classes.Proper] """ propers = {} my_db = db.DBConnection() # for each provider get a list of arbitrary Propers orig_thread_name = threading.currentThread().name for cur_provider in filter_iter(lambda p: p.is_active(), sickbeard.providers.sortedProviderList()): if not recent_anime and cur_provider.anime_only: continue if None is not proper_dict: found_propers = proper_dict.get(cur_provider.get_id(), []) if not found_propers: continue else: threading.currentThread().name = '%s :: [%s]' % (orig_thread_name, cur_provider.name) logger.log('Searching for new PROPER releases') try: found_propers = cur_provider.find_propers( search_date=aired_since_shows, shows=recent_shows, anime=recent_anime) except AuthException as e: logger.log('Authentication error: %s' % ex(e), logger.ERROR) continue except (BaseException, Exception) as e: logger.log( 'Error while searching %s, skipping: %s' % (cur_provider.name, ex(e)), logger.ERROR) logger.log(traceback.format_exc(), logger.ERROR) continue finally: threading.currentThread().name = orig_thread_name # if they haven't been added by a different provider than add the Proper to the list for cur_proper in found_propers: name = _generic_name(cur_proper.name) if name in propers: continue try: np = NameParser(False, try_scene_exceptions=True, show_obj=cur_proper.parsed_show_obj, indexer_lookup=False) parse_result = np.parse(cur_proper.name) except (InvalidNameException, InvalidShowException, Exception): continue # get the show object cur_proper.parsed_show_obj = ( cur_proper.parsed_show_obj or helpers.find_show_by_id(parse_result.show_obj.tvid_prodid)) if None is cur_proper.parsed_show_obj: logger.log( 'Skip download; cannot find show with ID [%s] from %s' % (cur_proper.prodid, sickbeard.TVInfoAPI( cur_proper.tvid).name), logger.ERROR) continue cur_proper.tvid = cur_proper.parsed_show_obj.tvid cur_proper.prodid = cur_proper.parsed_show_obj.prodid if not (-1 != cur_proper.prodid and parse_result.series_name and parse_result.episode_numbers and (cur_proper.tvid, cur_proper.prodid) in recent_shows + recent_anime): continue # only get anime Proper if it has release group and version if parse_result.is_anime and not parse_result.release_group and -1 == parse_result.version: logger.log( 'Ignored Proper with no release group and version in name [%s]' % cur_proper.name, logger.DEBUG) continue if not show_name_helpers.pass_wordlist_checks( cur_proper.name, parse=False, indexer_lookup=False): logger.log('Ignored unwanted Proper [%s]' % cur_proper.name, logger.DEBUG) continue re_x = dict(re_prefix='.*', re_suffix='.*') result = show_name_helpers.contains_any( cur_proper.name, cur_proper.parsed_show_obj.rls_ignore_words, **re_x) if None is not result and result: logger.log( 'Ignored Proper containing ignore word [%s]' % cur_proper.name, logger.DEBUG) continue result = show_name_helpers.contains_any( cur_proper.name, cur_proper.parsed_show_obj.rls_require_words, **re_x) if None is not result and not result: logger.log( 'Ignored Proper for not containing any required word [%s]' % cur_proper.name, logger.DEBUG) continue cur_size = getattr(cur_proper, 'size', None) if failed_history.has_failed(cur_proper.name, cur_size, cur_provider.name): continue cur_proper.season = parse_result.season_number if None is not parse_result.season_number else 1 cur_proper.episode = parse_result.episode_numbers[0] # check if we actually want this Proper (if it's the right quality) sql_result = my_db.select( 'SELECT release_group, status, version, release_name' ' FROM tv_episodes' ' WHERE indexer = ? AND showid = ?' ' AND season = ? AND episode = ?' ' LIMIT 1', [ cur_proper.tvid, cur_proper.prodid, cur_proper.season, cur_proper.episode ]) if not sql_result: continue # only keep the Proper if we already retrieved the same quality ep (don't get better/worse ones) # check if we want this release: same quality as current, current has correct status # restrict other release group releases to Proper's old_status, old_quality = Quality.splitCompositeStatus( int(sql_result[0]['status'])) cur_proper.quality = Quality.nameQuality(cur_proper.name, parse_result.is_anime) cur_proper.is_repack, cur_proper.properlevel = Quality.get_proper_level( parse_result.extra_info_no_name(), parse_result.version, parse_result.is_anime, check_is_repack=True) cur_proper.proper_level = cur_proper.properlevel # local non global value old_release_group = sql_result[0]['release_group'] try: same_release_group = parse_result.release_group.lower( ) == old_release_group.lower() except (BaseException, Exception): same_release_group = parse_result.release_group == old_release_group if old_status not in SNATCHED_ANY + [DOWNLOADED, ARCHIVED] \ or cur_proper.quality != old_quality \ or (cur_proper.is_repack and not same_release_group): continue np = NameParser(False, try_scene_exceptions=True, show_obj=cur_proper.parsed_show_obj, indexer_lookup=False) try: extra_info = np.parse( sql_result[0]['release_name']).extra_info_no_name() except (BaseException, Exception): extra_info = None # don't take Proper of the same level we already downloaded old_proper_level, old_extra_no_name, old_name = \ get_old_proper_level(cur_proper.parsed_show_obj, cur_proper.tvid, cur_proper.prodid, cur_proper.season, parse_result.episode_numbers, old_status, cur_proper.quality, extra_info, parse_result.version, parse_result.is_anime) if cur_proper.proper_level <= old_proper_level: continue is_web = ( old_quality in (Quality.HDWEBDL, Quality.FULLHDWEBDL, Quality.UHD4KWEB) or (old_quality == Quality.SDTV and isinstance(sql_result[0]['release_name'], string_types) and re.search(r'\Wweb.?(dl|rip|.([hx]\W?26[45]|hevc))\W', sql_result[0]['release_name'], re.I))) if is_web: old_name = (old_name, sql_result[0]['release_name'])[old_name in ('', None)] old_webdl_type = get_webdl_type(old_extra_no_name, old_name) new_webdl_type = get_webdl_type( parse_result.extra_info_no_name(), cur_proper.name) if old_webdl_type != new_webdl_type: logger.log( 'Ignored Proper webdl source [%s], does not match existing webdl source [%s] for [%s]' % (old_webdl_type, new_webdl_type, cur_proper.name), logger.DEBUG) continue # for webdls, prevent Propers from different groups log_same_grp = 'Ignored Proper from release group [%s] does not match existing group [%s] for [%s]' \ % (parse_result.release_group, old_release_group, cur_proper.name) if sickbeard.PROPERS_WEBDL_ONEGRP and is_web and not same_release_group: logger.log(log_same_grp, logger.DEBUG) continue # check if we actually want this Proper (if it's the right release group and a higher version) if parse_result.is_anime: old_version = int(sql_result[0]['version']) if not (-1 < old_version < parse_result.version): continue if not same_release_group: logger.log(log_same_grp, logger.DEBUG) continue found_msg = 'Found anime Proper v%s to replace v%s' % ( parse_result.version, old_version) else: found_msg = 'Found Proper [%s]' % cur_proper.name # make sure the episode has been downloaded before history_limit = datetime.datetime.today() - datetime.timedelta( days=30) # noinspection SqlResolve history_results = my_db.select( 'SELECT resource FROM history' ' WHERE indexer = ? AND showid = ?' ' AND season = ? AND episode = ? AND quality = ? AND date >= ?' ' AND (%s)' % ' OR '.join([ 'action LIKE "%%%02d"' % x for x in SNATCHED_ANY + [DOWNLOADED, ARCHIVED] ]), [ cur_proper.tvid, cur_proper.prodid, cur_proper.season, cur_proper.episode, cur_proper.quality, history_limit.strftime(history.dateFormat) ]) # skip if the episode has never downloaded, because a previous quality is required to match the Proper if not len(history_results): logger.log( 'Ignored Proper cannot find a recent history item for [%s]' % cur_proper.name, logger.DEBUG) continue # make sure that none of the existing history downloads are the same Proper as the download candidate clean_proper_name = _generic_name( helpers.remove_non_release_groups( cur_proper.name, cur_proper.parsed_show_obj.is_anime)) is_same = False for hitem in history_results: # if the result exists in history already we need to skip it if clean_proper_name == _generic_name( helpers.remove_non_release_groups( ek.ek(os.path.basename, hitem['resource']))): is_same = True break if is_same: logger.log('Ignored Proper already in history [%s]' % cur_proper.name) continue logger.log(found_msg, logger.DEBUG) # finish populating the Proper instance # cur_proper.show_obj = cur_proper.parsed_show_obj.prodid cur_proper.provider = cur_provider cur_proper.extra_info = parse_result.extra_info cur_proper.extra_info_no_name = parse_result.extra_info_no_name cur_proper.release_group = parse_result.release_group cur_proper.is_anime = parse_result.is_anime cur_proper.version = parse_result.version propers[name] = cur_proper cur_provider.log_result('Propers', len(propers), '%s' % cur_provider.name) return list_values(propers)
'torrentday', 'torrenting', 'torrentleech', 'tvchaosuk', 'xspeeds', 'zooqle', # anime 'tokyotoshokan', ] for module in __all__: try: m = importlib.import_module('.' + module, 'sickbeard.providers') globals().update( {n: getattr(m, n) for n in m.__all__} if hasattr(m, '__all__') else dict( filter_iter(lambda t: '_' != t[0][0], iteritems(m.__dict__)))) except ImportError as e: if 'custom' != module[0:6]: raise e def sortedProviderList(): initialList = sickbeard.providerList + sickbeard.newznabProviderList + sickbeard.torrentRssProviderList providerDict = dict(zip([x.get_id() for x in initialList], initialList)) newList = [] # add all modules in the priority list, in order for curModule in sickbeard.PROVIDER_ORDER: if curModule in providerDict: newList.append(providerDict[curModule])
def xem_refresh(tvid, prodid, force=False): """ Refresh data from xem for a tv show :param tvid: :type tvid: int :param prodid: :type prodid: int :param force: :type force: bool """ if None is prodid: return tvid, prodid = int(tvid), int(prodid) tvinfo = sickbeard.TVInfoAPI(tvid) if 'xem_origin' not in tvinfo.config or prodid not in xem_ids_list.get( tvid, []): return xem_origin = tvinfo.config['xem_origin'] # XEM API URL url = 'http://thexem.de/map/all?id=%s&origin=%s&destination=scene' % ( prodid, xem_origin) max_refresh_age_secs = 86400 # 1 day my_db = db.DBConnection() rows = my_db.select( 'SELECT last_refreshed' ' FROM xem_refresh' ' WHERE indexer = ? AND indexer_id = ?', [tvid, prodid]) if rows: last_refresh = int(rows[0]['last_refreshed']) refresh = int(time.mktime(datetime.datetime.today().timetuple()) ) > last_refresh + max_refresh_age_secs else: refresh = True if refresh or force: logger.log( u'Looking up XEM scene mapping for show %s on %s' % (prodid, tvinfo.name), logger.DEBUG) # mark refreshed my_db.upsert( 'xem_refresh', dict(last_refreshed=int( time.mktime(datetime.datetime.today().timetuple()))), dict(indexer=tvid, indexer_id=prodid)) try: parsed_json = sickbeard.helpers.get_url(url, parse_json=True, timeout=90) if not parsed_json or '' == parsed_json: logger.log( u'No XEM data for show %s on %s' % (prodid, tvinfo.name), logger.MESSAGE) return if 'success' in parsed_json['result']: cl = map_list( lambda entry: [ 'UPDATE tv_episodes' ' SET scene_season = ?, scene_episode = ?, scene_absolute_number = ?' ' WHERE indexer = ? AND showid = ?' ' AND season = ? AND episode = ?', [ entry.get('scene%s' % ('', '_2')['scene_2' in entry]).get(v) for v in ('season', 'episode', 'absolute') ] + [tvid, prodid] + [ entry.get(xem_origin).get(v) for v in ('season', 'episode') ] ], filter_iter(lambda x: 'scene' in x, parsed_json['data'])) if 0 < len(cl): my_db = db.DBConnection() my_db.mass_action(cl) else: logger.log( u'Empty lookup result - no XEM data for show %s on %s' % (prodid, tvinfo.name), logger.DEBUG) except (BaseException, Exception) as e: logger.log( u'Exception refreshing XEM data for show ' + str(prodid) + ' on ' + tvinfo.name + ': ' + ex(e), logger.WARNING) logger.log(traceback.format_exc(), logger.ERROR)
def _search_provider(self, search_params, **kwargs): results = [] if not self._authorised(): return results items = {'Cache': [], 'Season': [], 'Episode': [], 'Propers': []} rc = dict([(k, re.compile('(?i)' + v)) for (k, v) in iteritems({ 'info': r'.*?details\s*-\s*', 'get': 'download' })]) log = '' if self.filter: non_marked = 'f0' in self.filter # if search_any, use unselected to exclude, else use selected to keep filters = ([f for f in self.may_filter if f in self.filter], [f for f in self.may_filter if f not in self.filter])[non_marked] filters += (( (all([x in filters for x in ('free', 'double')]) and ['freedouble'] or []) + (all([x in filters for x in ('half', 'double')]) and ['halfdouble'] or []) ), ((not all([x not in filters for x in ('free', 'double')]) and ['freedouble'] or []) + (not all([x not in filters for x in ('half', 'double')]) and ['halfdouble'] or [])))[non_marked] rc['filter'] = re.compile('(?i)^(%s)$' % '|'.join([ '%s' % f for f in filters if (f in self.may_filter and self.may_filter[f][1]) or f ])) log = '%sing (%s) ' % (('keep', 'skipp')[non_marked], ', '.join([ f in self.may_filter and self.may_filter[f][0] or f for f in filters ])) for mode in search_params: if mode in ['Season', 'Episode']: show_type = self.show_obj.air_by_date and 'Air By Date' \ or self.show_obj.is_sports and 'Sports' or self.show_obj.is_anime and 'Anime' or None if show_type: logger.log( u'Provider does not carry shows of type: [%s], skipping' % show_type, logger.DEBUG) return results for search_string in search_params[mode]: search_string = unidecode(search_string) search_url = self.urls['search'] % ('+'.join( search_string.split()), self._categories_string(mode, '')) html = self.get_url(search_url) if self.should_skip(): return results cnt = len(items[mode]) try: if not html or self._has_no_results(html): raise generic.HaltParseException with BS4Parser( html, parse_only=dict(table={ 'class': (lambda at: at and 'table' in at) })) as tbl: tbl_rows = [] if not tbl else tbl.find_all('tr') if 2 > len(tbl_rows): raise generic.HaltParseException head = None for tr in tbl_rows[1:]: cells = tr.find_all('td') if 5 > len(cells) or (self.confirmed and tr.find( 'i', title=re.compile('(?i)unverified'))): continue if any(self.filter): marked = ','.join([ x.attrs.get('title', '').lower() for x in tr.find_all( 'i', attrs={ 'class': [ 'fa-star', 'fa-diamond', 'fa-star-half-o' ] }) ]) munged = ''.join( filter_iter(marked.__contains__, ['free', 'half', 'double'])) # noinspection PyUnboundLocalVariable if ((non_marked and rc['filter'].search(munged)) or (not non_marked and not rc['filter'].search(munged))): continue try: head = head if None is not head else self._header_row( tr) seeders, leechers, size = [ try_int(n, n) for n in [ cells[head[x]].get_text().strip() for x in ('seed', 'leech', 'size') ] ] if self._reject_item(seeders, leechers): continue title = rc['info'].sub( '', tr.find('a', attrs={'title': rc['info']})['title']) download_url = self._link( tr.find('a', href=rc['get'])['href']) except (AttributeError, TypeError, ValueError, IndexError): continue if title and download_url: items[mode].append( (title, download_url, seeders, self._bytesizer(size))) except generic.HaltParseException: pass except (BaseException, Exception): logger.log( u'Failed to parse. Traceback: %s' % traceback.format_exc(), logger.ERROR) self._log_search(mode, len(items[mode]) - cnt, log + search_url) results = self._sort_seeding(mode, results + items[mode]) return results
def retrieve_exceptions(): """ Looks up the exceptions on github, parses them into a dict, and inserts them into the scene_exceptions table in cache.db. Also clears the scene name cache. """ global exception_dict, anidb_exception_dict, xem_exception_dict # exceptions are stored on github pages for tvid in sickbeard.TVInfoAPI().sources: if should_refresh(sickbeard.TVInfoAPI(tvid).name): logger.log(u'Checking for scene exception updates for %s' % sickbeard.TVInfoAPI(tvid).name) url = sickbeard.TVInfoAPI(tvid).config['scene_url'] url_data = helpers.get_url(url) if None is url_data: # When None is urlData, trouble connecting to github logger.log( u'Check scene exceptions update failed. Unable to get URL: %s' % url, logger.ERROR) continue else: set_last_refresh(sickbeard.TVInfoAPI(tvid).name) # each exception is on one line with the format indexer_id: 'show name 1', 'show name 2', etc for cur_line in url_data.splitlines(): cur_line = cur_line prodid, sep, aliases = cur_line.partition(':') if not aliases: continue prodid = int(prodid) # regex out the list of shows, taking \' into account # alias_list = [re.sub(r'\\(.)', r'\1', x) for x in re.findall(r"'(.*?)(?<!\\)',?", aliases)] alias_list = [{ re.sub(r'\\(.)', r'\1', x): -1 } for x in re.findall(r"'(.*?)(?<!\\)',?", aliases)] exception_dict[(tvid, prodid)] = alias_list del alias_list del url_data # XEM scene exceptions _xem_exceptions_fetcher() for xem_ex in xem_exception_dict: if xem_ex in exception_dict: exception_dict[ xem_ex] = exception_dict[xem_ex] + xem_exception_dict[xem_ex] else: exception_dict[xem_ex] = xem_exception_dict[xem_ex] # AniDB scene exceptions _anidb_exceptions_fetcher() for anidb_ex in anidb_exception_dict: if anidb_ex in exception_dict: exception_dict[anidb_ex] = exception_dict[ anidb_ex] + anidb_exception_dict[anidb_ex] else: exception_dict[anidb_ex] = anidb_exception_dict[anidb_ex] # Custom exceptions custom_exception_dict, cnt_updated_numbers, min_remain_iv = _custom_exceptions_fetcher( ) for custom_ex in custom_exception_dict: if custom_ex in exception_dict: exception_dict[custom_ex] = exception_dict[ custom_ex] + custom_exception_dict[custom_ex] else: exception_dict[custom_ex] = custom_exception_dict[custom_ex] changed_exceptions = False # write all the exceptions we got off the net into the database my_db = db.DBConnection() cl = [] for cur_tvid_prodid in exception_dict: # get a list of the existing exceptions for this ID existing_exceptions = [{ x['show_name']: x['season'] } for x in my_db.select( 'SELECT show_name, season' ' FROM scene_exceptions' ' WHERE indexer = ? AND indexer_id = ?', list(cur_tvid_prodid))] # if this exception isn't already in the DB then add it for cur_exception_dict in filter_iter( lambda e: e not in existing_exceptions, exception_dict[cur_tvid_prodid]): try: cur_exception, cur_season = next(iteritems(cur_exception_dict)) except (BaseException, Exception): logger.log('scene exception error', logger.ERROR) logger.log(traceback.format_exc(), logger.ERROR) continue if PY2 and not isinstance(cur_exception, text_type): cur_exception = text_type(cur_exception, 'utf-8', 'replace') cl.append([ 'INSERT INTO scene_exceptions' ' (indexer, indexer_id, show_name, season) VALUES (?,?,?,?)', list(cur_tvid_prodid) + [cur_exception, cur_season] ]) changed_exceptions = True if cl: my_db.mass_action(cl) name_cache.buildNameCache(update_only_scene=True) # since this could invalidate the results of the cache we clear it out after updating if changed_exceptions: logger.log(u'Updated scene exceptions') else: logger.log(u'No scene exceptions update needed') # cleanup exception_dict.clear() anidb_exception_dict.clear() xem_exception_dict.clear() return changed_exceptions, cnt_updated_numbers, min_remain_iv
def _download_propers(proper_list): # type: (List[Proper]) -> None """ download propers from given list :param proper_list: proper list """ verified_propers = True consumed_proper = [] downloaded_epid = set() _epid = operator.attrgetter('tvid', 'prodid', 'season', 'episode') while verified_propers: verified_propers = set() # get verified list; sort the list of unique Propers for highest proper_level, newest first for cur_proper in sorted( filter_iter(lambda p: p not in consumed_proper, # allows Proper to fail or be rejected and another to be tried (with a different name) filter_iter(lambda p: _epid(p) not in downloaded_epid, proper_list)), key=operator.attrgetter('properlevel', 'date'), reverse=True): # type: Proper epid = _epid(cur_proper) # if the show is in our list and there hasn't been a Proper already added for that particular episode # then add it to our list of Propers if epid not in map_list(_epid, verified_propers): logger.log('Proper may be useful [%s]' % cur_proper.name) verified_propers.add(cur_proper) else: # use Proper with the highest level remove_propers = set() map_consume(lambda vp: remove_propers.add(vp), filter_iter(lambda p: (epid == _epid(p) and cur_proper.proper_level > p.proper_level), verified_propers)) if remove_propers: verified_propers -= remove_propers logger.log('A more useful Proper [%s]' % cur_proper.name) verified_propers.add(cur_proper) for cur_proper in list(verified_propers): consumed_proper += [cur_proper] # scene release checking scene_only = getattr(cur_proper.provider, 'scene_only', False) non_scene_fallback = getattr(cur_proper.provider, 'scene_loose', False) \ or getattr(cur_proper.provider, 'scene_loose_active', False) scene_rej_nuked = getattr(cur_proper.provider, 'scene_rej_nuked', False) scene_nuked_active = getattr(cur_proper.provider, 'scene_nuked_active', False) if any([scene_only, non_scene_fallback, scene_rej_nuked, scene_nuked_active]) \ and not cur_proper.parsed_show_obj.is_anime: scene_or_contain = getattr(cur_proper.provider, 'scene_or_contain', '') scene_contains = False if scene_only and scene_or_contain: re_extras = dict(re_prefix='.*', re_suffix='.*') r = show_name_helpers.contains_any(cur_proper.name, scene_or_contain, **re_extras) if None is not r and r: scene_contains = True if scene_contains and not scene_rej_nuked: reject = False else: reject, url = search.can_reject(cur_proper.name) if reject: if isinstance(reject, string_types): if scene_rej_nuked and not scene_nuked_active: logger.log('Rejecting nuked release. Nuke reason [%s] source [%s]' % (reject, url), logger.DEBUG) else: logger.log('Considering nuked release. Nuke reason [%s] source [%s]' % (reject, url), logger.DEBUG) reject = False elif scene_contains or non_scene_fallback: reject = False else: logger.log('Rejecting as not scene release listed at any [%s]' % url, logger.DEBUG) if reject: continue # make the result object ep_obj = cur_proper.parsed_show_obj.get_episode(cur_proper.season, cur_proper.episode) result = cur_proper.provider.get_result([ep_obj], cur_proper.url) if None is result: continue result.name = cur_proper.name result.quality = cur_proper.quality result.version = cur_proper.version result.properlevel = cur_proper.proper_level result.is_repack = cur_proper.is_repack result.puid = cur_proper.puid # snatch it if search.snatch_episode(result, SNATCHED_PROPER): downloaded_epid.add(_epid(cur_proper))