def getWatchlist(self): enablers = [ try_int(x) for x in split_string(self.conf('automation_urls_use')) ] urls = split_string(self.conf('automation_urls')) index = -1 movies = [] for username in urls: index += 1 if not enablers[index]: continue soup = BeautifulSoup(self.getHTMLData(self.url % (username, 1))) pagination = soup.find_all('li', attrs={'class': 'paginate-page'}) number_of_pages = try_int( pagination[-1].find('a').get_text()) if pagination else 1 pages = list(range(1, number_of_pages)) for page in pages: soup = BeautifulSoup( self.getHTMLData(self.url % (username, page))) movies += self.getMoviesFromHTML(soup) return movies
def getIMDBids(self): movies = [] urls = dict(list(zip(split_string(self.conf('automation_urls')), [try_int(x) for x in split_string(self.conf('automation_urls_use'))]))) for url in urls: if not urls[url]: continue rss_movies = self.getRSSData(url) for movie in rss_movies: description = self.get_text_element(movie, 'description') grabs = 0 for item in movie: if item.attrib.get('name') == 'grabs': grabs = item.attrib.get('value') break if int(grabs) > try_int(self.conf('number_grabs')): title = re.match(r'.*Title: .a href.*/">(.*) \(\d{4}\).*', description).group(1) log.info2('%s grabs for movie: %s, enqueue...', (grabs, title)) year = re.match(r'.*Year: (\d{4}).*', description).group(1) imdb = self.search(title, year) if imdb and self.isMinimalMovie(imdb): movies.append(imdb['imdb']) return movies
def parseMovie(self, movie): movie_data = {} try: try: if isinstance(movie, str): movie = json.loads(movie) except ValueError: log.info('No proper json to decode') return movie_data if movie.get('Response') == 'Parse Error' or movie.get( 'Response') == 'False': return movie_data if movie.get('Type').lower() != 'movie': return movie_data tmp_movie = movie.copy() for key in tmp_movie: tmp_movie_elem = tmp_movie.get(key) if not isinstance(tmp_movie_elem, str) or tmp_movie_elem.lower() == 'n/a': del movie[key] year = try_int(movie.get('Year', '')) movie_data = { 'type': 'movie', 'via_imdb': True, 'titles': [movie.get('Title')] if movie.get('Title') else [], 'original_title': movie.get('Title'), 'images': { 'poster': [movie.get('Poster', '')] if movie.get('Poster') and len(movie.get('Poster', '')) > 4 else [], }, 'rating': { 'imdb': (try_float(movie.get('imdbRating', 0)), try_int(movie.get('imdbVotes', '').replace(',', ''))), #'rotten': (tryFloat(movie.get('tomatoRating', 0)), tryInt(movie.get('tomatoReviews', '').replace(',', ''))), }, 'imdb': str(movie.get('imdbID', '')), 'mpaa': str(movie.get('Rated', '')), 'runtime': self.runtimeToMinutes(movie.get('Runtime', '')), 'released': movie.get('Released'), 'year': year if isinstance(year, int) else None, 'plot': movie.get('Plot'), 'genres': split_string(movie.get('Genre', '')), 'directors': split_string(movie.get('Director', '')), 'writers': split_string(movie.get('Writer', '')), 'actors': split_string(movie.get('Actors', '')), } movie_data = dict((k, v) for k, v in list(movie_data.items()) if v) except: log.error('Failed parsing IMDB API json: %s', traceback.format_exc()) return movie_data
def suggestView(self, limit=6, **kwargs): if self.is_disabled(): return {'success': True, 'movies': []} movies = split_string(kwargs.get('movies', '')) ignored = split_string(kwargs.get('ignored', '')) seen = split_string(kwargs.get('seen', '')) cached_suggestion = self.getCache('suggestion_cached') if cached_suggestion: suggestions = cached_suggestion else: if not movies or len(movies) == 0: active_movies = fire_event('media.with_status', ['active', 'done'], types='movie', single=True) movies = [get_identifier(x) for x in active_movies] if not ignored or len(ignored) == 0: ignored = split_string(Env.prop('suggest_ignore', default='')) if not seen or len(seen) == 0: movies.extend( split_string(Env.prop('suggest_seen', default=''))) suggestions = fire_event('movie.suggest', movies=movies, ignore=ignored, single=True) self.setCache('suggestion_cached', suggestions, timeout=6048000) # Cache for 10 weeks medias = [] for suggestion in suggestions[:int(limit)]: # Cache poster posters = suggestion.get('images', {}).get('poster', []) poster = [x for x in posters if 'tmdb' in x] posters = poster if len(poster) > 0 else posters cached_poster = fire_event( 'file.download', url=posters[0], single=True) if len(posters) > 0 else False files = {'image_poster': [cached_poster]} if cached_poster else {} medias.append({ 'status': 'suggested', 'title': get_title(suggestion), 'type': 'movie', 'info': suggestion, 'files': files, 'identifiers': { 'imdb': suggestion.get('imdb') } }) return {'success': True, 'movies': medias}
def getMovie(self, url): name = split_string(split_string(url, '/ijw_')[-1], '/')[0] if name.startswith('ijw_'): name = name[4:] year_name = fire_event('scanner.name_year', name, single=True) return self.search(year_name.get('name'), year_name.get('year'))
def charView(self, **kwargs): type = split_string(kwargs.get('type', 'movie')) status = split_string(kwargs.get('status', None)) release_status = split_string(kwargs.get('release_status', None)) chars = self.availableChars(type, status, release_status) return { 'success': True, 'empty': len(chars) == 0, 'chars': chars, }
def calculate(self, nzb, movie): """ Calculate the score of a NZB, used for sorting later """ # Merge global and category preferred_words = split_string(Env.setting('preferred_words', section='searcher').lower()) try: preferred_words = remove_duplicate(preferred_words + split_string(movie['category']['preferred'].lower())) except: pass score = nameScore(to_unicode(nzb['name']), movie['info']['year'], preferred_words) for movie_title in movie['info']['titles']: score += nameRatioScore(to_unicode(nzb['name']), to_unicode(movie_title)) score += namePositionScore(to_unicode(nzb['name']), to_unicode(movie_title)) score += sizeScore(nzb['size']) # Torrents only if nzb.get('seeders'): try: score += nzb.get('seeders') * 100 / 15 score += nzb.get('leechers') * 100 / 30 except: pass # Provider score score += providerScore(nzb['provider']) # Duplicates in name score += duplicateScore(nzb['name'], get_title(movie)) # Merge global and category ignored_words = split_string(Env.setting('ignored_words', section='searcher').lower()) try: ignored_words = remove_duplicate(ignored_words + split_string(movie['category']['ignored'].lower())) except: pass # Partial ignored words score += partialIgnoredScore(nzb['name'], get_title(movie), ignored_words) # Ignore single downloads from multipart score += halfMultipartScore(nzb['name']) # Extra provider specific check extra_score = nzb.get('extra_score') if extra_score: score += extra_score(nzb) # Scene / Nuke scoring score += sceneScore(nzb['name']) return score
def toList(self, log_content = ''): logs_raw = re.split(r'\[0m\n', to_unicode(log_content)) logs = [] re_split = r'\x1b' for log_line in logs_raw: split = re.split(re_split, log_line) if split and len(split) == 3: try: date, time, log_type = split_string(split[0], ' ') timestamp = '%s %s' % (date, time) except: timestamp = 'UNKNOWN' log_type = 'UNKNOWN' message = ''.join(split[1]) if len(split) > 1 else split[0] message = re.sub('\[\d+m\[', '[', message) logs.append({ 'time': timestamp, 'type': log_type, 'message': message }) return logs
def ignoreView(self, imdb=None, **kwargs): ignored = split_string(Env.prop('charts_ignore', default='')) if imdb: ignored.append(imdb) Env.prop('charts_ignore', ','.join(set(ignored))) return {'result': True}
def deleteView(self, id='', **kwargs): ids = split_string(id) for media_id in ids: self.delete(media_id, delete_from=kwargs.get('delete_from', 'all')) return { 'success': True, }
def automationView(self, force_update=False, **kwargs): db = get_db() charts = fire_event('automation.get_chart_list', merge=True) ignored = split_string(Env.prop('charts_ignore', default='')) # Create a list the movie/list.js can use for chart in charts: medias = [] for media in chart.get('list', []): identifier = media.get('imdb') if identifier in ignored: continue try: try: in_library = db.get('media', 'imdb-%s' % identifier) if in_library: continue except RecordNotFound: pass except: pass # Cache poster posters = media.get('images', {}).get('poster', []) poster = [x for x in posters if 'tmdb' in x] posters = poster if len(poster) > 0 else posters cached_poster = fire_event( 'file.download', url=posters[0], single=True) if len(posters) > 0 else False files = { 'image_poster': [cached_poster] } if cached_poster else {} medias.append({ 'status': 'chart', 'title': get_title(media), 'type': 'movie', 'info': media, 'files': files, 'identifiers': { 'imdb': identifier } }) chart['list'] = medias return { 'success': True, 'count': len(charts), 'charts': charts, 'ignored': ignored, }
def listView(self, **kwargs): total_movies, movies = self.list( types=split_string(kwargs.get('type')), status=split_string(kwargs.get('status')), release_status=split_string(kwargs.get('release_status')), status_or=kwargs.get('status_or') is not None, limit_offset=kwargs.get('limit_offset'), with_tags=split_string(kwargs.get('with_tags')), starts_with=kwargs.get('starts_with'), search=kwargs.get('search')) return { 'success': True, 'empty': len(movies) == 0, 'total': total_movies, 'movies': movies, }
def getIMDBids(self): movies = [] enablers = [try_int(x) for x in split_string(self.conf('automation_urls_use'))] urls = split_string(self.conf('automation_urls')) namespace = 'http://www.w3.org/2005/Atom' namespace_im = 'http://itunes.apple.com/rss' index = -1 for url in urls: index += 1 if len(enablers) == 0 or len(enablers) < index or not enablers[index]: continue try: cache_key = 'itunes.rss.%s' % md5(url) rss_data = self.getCache(cache_key, url) data = XMLTree.fromstring(rss_data) if data is not None: entry_tag = str(QName(namespace, 'entry')) rss_movies = self.get_elements(data, entry_tag) for movie in rss_movies: name_tag = str(QName(namespace_im, 'name')) name = self.get_text_element(movie, name_tag) releaseDate_tag = str(QName(namespace_im, 'releaseDate')) releaseDateText = self.get_text_element(movie, releaseDate_tag) year = datetime.datetime.strptime(releaseDateText, '%Y-%m-%dT00:00:00-07:00').strftime("%Y") imdb = self.search(name, year) if imdb and self.isMinimalMovie(imdb): movies.append(imdb['imdb']) except: log.error('Failed loading iTunes rss feed: %s %s', (url, traceback.format_exc())) return movies
def createStringIdentifier(self, file_path, folder='', exclude_filename=False): identifier = file_path.replace(folder, '').lstrip(os.path.sep) # root folder identifier = os.path.splitext(identifier)[0] # ext # Exclude file name path if needed (f.e. for DVD files) if exclude_filename: identifier = identifier[:len(identifier) - len(os.path.split(identifier)[-1])] # Make sure the identifier is lower case as all regex is with lower case tags identifier = identifier.lower() try: path_split = split_string(identifier, os.path.sep) identifier = path_split[-2] if len(path_split) > 1 and len( path_split[-2]) > len(path_split[-1]) else path_split[ -1] # Only get filename except: pass # multipart identifier = self.removeMultipart(identifier) # remove cptag identifier = self.removeCPTag(identifier) # simplify the string identifier = simplify_string(identifier) year = self.findYear(file_path) # groups, release tags, scenename cleaner identifier = re.sub(self.clean, '::', identifier).strip(':') # Year if year and identifier[:4] != year: split_by = ':::' if ':::' in identifier else year identifier = '%s %s' % (identifier.split(split_by)[0].strip(), year) else: identifier = identifier.split('::')[0] # Remove duplicates out = [] for word in identifier.split(): if not word in out: out.append(word) identifier = ' '.join(out) return simplify_string(identifier)
def containsWords(self, rel_name, rel_words, conf, media): # Make sure it has required words words = split_string(self.conf('%s_words' % conf, section='searcher').lower()) try: words = remove_duplicate(words + split_string(media['category'][conf].lower())) except: pass req_match = 0 for req_set in words: if len(req_set) >= 2 and (req_set[:1] + req_set[-1:]) == '//': if re.search(req_set[1:-1], rel_name): log.debug('Regex match: %s', req_set[1:-1]) req_match += 1 else: req = split_string(req_set, '&') req_match += len(list(set(rel_words) & set(req))) == len(req) return words, req_match > 0
def isMinimalMovie(self, movie): if not movie.get('rating'): log.info('ignoring %s as no rating is available for.', (movie['original_title'])) return False if movie['rating'] and movie['rating'].get('imdb'): movie['votes'] = movie['rating']['imdb'][1] movie['rating'] = movie['rating']['imdb'][0] for minimal_type in ['year', 'rating', 'votes']: type_value = movie.get(minimal_type, 0) type_min = self.getMinimal(minimal_type) if type_value < type_min: log.info('%s too low for %s, need %s has %s', (minimal_type, movie['original_title'], type_min, type_value)) return False movie_genres = [genre.lower() for genre in movie['genres']] required_genres = split_string( self.getMinimal('required_genres').lower()) ignored_genres = split_string( self.getMinimal('ignored_genres').lower()) req_match = 0 for req_set in required_genres: req = split_string(req_set, '&') req_match += len(list(set(movie_genres) & set(req))) == len(req) if self.getMinimal('required_genres') and req_match == 0: log.info2('Required genre(s) missing for %s', movie['original_title']) return False for ign_set in ignored_genres: ign = split_string(ign_set, '&') if len(list(set(movie_genres) & set(ign))) == len(ign): log.info2('%s has blacklisted genre(s): %s', (movie['original_title'], ign)) return False return True
def _searchOnTitle(self, title, media, quality, results): search_url = self.urls['search'] # Create search parameters search_params = self.buildUrl(title, media, quality) min_seeds = try_int(self.conf('minimal_seeds')) if min_seeds: search_params += ' seed > %s' % (min_seeds - 1) rss_data = self.getRSSData(search_url % search_params) if rss_data: try: for result in rss_data: name = self.get_text_element(result, 'title') detail_url = self.get_text_element(result, 'link') description = self.get_text_element(result, 'description') magnet = split_string(detail_url, '/')[-1] magnet_url = 'magnet:?xt=urn:btih:%s&dn=%s&tr=%s' % ( magnet.upper(), try_url_encode(name), try_url_encode( 'udp://tracker.openbittorrent.com/announce')) reg = re.search( 'Size: (?P<size>\d+) (?P<unit>[KMG]B) Seeds: (?P<seeds>[\d,]+) Peers: (?P<peers>[\d,]+)', six.text_type(description)) size = reg.group('size') unit = reg.group('unit') seeds = reg.group('seeds').replace(',', '') peers = reg.group('peers').replace(',', '') multiplier = 1 if unit == 'GB': multiplier = 1000 elif unit == 'KB': multiplier = 0 results.append({ 'id': magnet, 'name': six.text_type(name), 'url': magnet_url, 'detail_url': detail_url, 'size': try_int(size) * multiplier, 'seeders': try_int(seeds), 'leechers': try_int(peers), }) except: log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc()))
def migrate(self): url = self.conf('url') if url: host_split = split_string(url.split('://')[-1], split_on='/') self.conf('ssl', value = url.startswith('https')) self.conf('host', value = host_split[0].strip()) self.conf('rpc_url', value = '/'.join(host_split[1:])) self.deleteConf('url')
def getWatchlist(self): enablers = [try_int(x) for x in split_string(self.conf('automation_ids_use'))] ids = split_string(self.conf('automation_ids')) index = -1 movies = [] for user_id in ids: index += 1 if not enablers[index]: continue data = self.getJsonData(self.url % user_id, decode_from = 'iso-8859-1') for movie in data: movies.append({ 'title': movie['movie']['title'], 'year': movie['movie']['year'] }) return movies
def ignoreView(self, imdb=None, limit=6, remove_only=False, mark_seen=False, **kwargs): ignored = split_string(Env.prop('suggest_ignore', default='')) seen = split_string(Env.prop('suggest_seen', default='')) new_suggestions = [] if imdb: if mark_seen: seen.append(imdb) Env.prop('suggest_seen', ','.join(set(seen))) elif not remove_only: ignored.append(imdb) Env.prop('suggest_ignore', ','.join(set(ignored))) new_suggestions = self.updateSuggestionCache(ignore_imdb=imdb, limit=limit, ignored=ignored, seen=seen) if len(new_suggestions) <= limit: return {'result': False} # Only return new (last) item media = { 'status': 'suggested', 'title': get_title(new_suggestions[limit]), 'type': 'movie', 'info': new_suggestions[limit], 'identifiers': { 'imdb': new_suggestions[limit].get('imdb') } } return {'result': True, 'movie': media}
def getIMDBids(self): ids = split_string(self.conf('automation_ids')) if len(ids) == 0: return [] movies = [] for movie in self.getWatchlist(): imdb_id = self.search(movie.get('title'), movie.get('year'), imdb_only = True) movies.append(imdb_id) return movies
def markAsRead(self, ids=None, **kwargs): ids = split_string(ids) if ids else None try: db = get_db() for x in db.all('notification_unread', with_doc=True): if not ids or x['_id'] in ids: x['doc']['read'] = True db.update(x['doc']) return {'success': True} except: log.error('Failed mark as read: %s', traceback.format_exc()) return {'success': False}
def refresh(self, id='', **kwargs): handlers = [] ids = split_string(id) for x in ids: refresh_handler = self.createRefreshHandler(x) if refresh_handler: handlers.append(refresh_handler) fire_event('notify.frontend', type='media.busy', data={'_id': ids}) fire_event_async('schedule.queue', handlers=handlers) return { 'success': True, }
def edit(self, id = '', **kwargs): try: db = get_db() ids = split_string(id) for media_id in ids: try: m = db.get('id', media_id) m['profile_id'] = kwargs.get('profile_id') or m['profile_id'] cat_id = kwargs.get('category_id') if cat_id is not None: m['category_id'] = cat_id if len(cat_id) > 0 else m['category_id'] # Remove releases for rel in fire_event('release.for_media', m['_id'], single=True): if rel['status'] is 'available': db.delete(rel) # Default title if kwargs.get('default_title'): m['title'] = kwargs.get('default_title') db.update(m) fire_event('media.restatus', m['_id'], single=True) m = db.get('id', media_id) movie_dict = fire_event('media.get', m['_id'], single=True) fire_event_async('movie.searcher.single', movie_dict, on_complete=self.createNotifyFront(media_id)) except: print((traceback.format_exc())) log.error('Can\'t edit non-existing media') return { 'success': True, } except: log.error('Failed editing media: %s', traceback.format_exc()) return { 'success': False, }
def getHosts(self): uses = split_string(str(self.conf('use')), clean=False) hosts = split_string(self.conf('host'), clean=False) api_keys = split_string(self.conf('api_key'), clean=False) extra_score = split_string(self.conf('extra_score'), clean=False) custom_tags = split_string(self.conf('custom_tag'), clean=False) custom_categories = split_string(self.conf('custom_categories'), clean=False) list = [] for nr in range(len(hosts)): try: key = api_keys[nr] except: key = '' try: host = hosts[nr] except: host = '' try: score = try_int(extra_score[nr]) except: score = 0 try: custom_tag = custom_tags[nr] except: custom_tag = '' try: custom_category = custom_categories[nr].replace(" ", ",") except: custom_category = '' list.append({ 'use': uses[nr], 'host': host, 'api_key': key, 'extra_score': score, 'custom_tag': custom_tag, 'custom_category': custom_category }) return list
def notify(self, message='', data=None, listener=None): if not data: data = {} nma = pynma.PyNMA() keys = split_string(self.conf('api_key')) nma.addkey(keys) nma.developerkey(self.conf('dev_key')) response = nma.push(application=self.default_title, event=message.split(' ')[0], description=message, priority=self.conf('priority'), batch_mode=len(keys) > 1) successful = 0 for key in keys: if not response[str(key)]['code'] == six.u('200'): log.error( 'Could not send notification to NotifyMyAndroid (%s). %s', (key, response[key]['message'])) else: successful += 1 return successful == len(keys)
def listView(self, limit_offset=None, **kwargs): db = get_db() if limit_offset: splt = split_string(limit_offset) limit = try_int(splt[0]) offset = try_int(0 if len(splt) is 1 else splt[1]) results = db.all('notification', limit=limit, offset=offset, with_doc=True) else: results = db.all('notification', limit=200, with_doc=True) notifications = [] for n in results: notifications.append(n['doc']) return { 'success': True, 'empty': len(notifications) == 0, 'notifications': notifications }
def getDevices(self): return split_string(self.conf('devices'))
def getLanguages(self): languages = split_string(Env.setting('languages', section='core')) if len(languages): return languages return ['en']
def list(self, types=None, status=None, release_status=None, status_or=False, limit_offset=None, with_tags=None, starts_with=None, search=None): db = get_db() # Make a list from string if status and not isinstance(status, (list, tuple)): status = [status] if release_status and not isinstance(release_status, (list, tuple)): release_status = [release_status] if types and not isinstance(types, (list, tuple)): types = [types] if with_tags and not isinstance(with_tags, (list, tuple)): with_tags = [with_tags] # query media ids if types: all_media_ids = set() for media_type in types: all_media_ids = all_media_ids.union( set([ x['_id'] for x in db.get_many('media_by_type', media_type) ])) else: all_media_ids = set([x['_id'] for x in db.all('media')]) media_ids = list(all_media_ids) filter_by = {} # Filter on movie status if status and len(status) > 0: filter_by['media_status'] = set() for media_status in fire_event('media.with_status', status, with_doc=False, single=True): filter_by['media_status'].add(media_status.get('_id')) # Filter on release status if release_status and len(release_status) > 0: filter_by['release_status'] = set() for release_status in fire_event('release.with_status', release_status, with_doc=False, single=True): filter_by['release_status'].add(release_status.get('media_id')) # Add search filters if starts_with: starts_with = to_unicode(starts_with.lower())[0] starts_with = starts_with if starts_with in ascii_lowercase else '#' filter_by['starts_with'] = [ x['_id'] for x in db.get_many('media_startswith', starts_with) ] # Add tag filter if with_tags: filter_by['with_tags'] = set() for tag in with_tags: for x in db.get_many('media_tag', tag): filter_by['with_tags'].add(x['_id']) # Filter with search query if search: filter_by['search'] = [ x['_id'] for x in db.get_many('media_search_title', search) ] if status_or and 'media_status' in filter_by and 'release_status' in filter_by: filter_by['status'] = list(filter_by['media_status']) + list( filter_by['release_status']) del filter_by['media_status'] del filter_by['release_status'] # Filter by combining ids for x in filter_by: media_ids = [n for n in media_ids if n in filter_by[x]] total_count = len(media_ids) if total_count == 0: return 0, [] offset = 0 limit = -1 if limit_offset: splt = split_string(limit_offset) if isinstance( limit_offset, str) else limit_offset limit = try_int(splt[0]) offset = try_int(0 if len(splt) is 1 else splt[1]) # List movies based on title order medias = [] for m in db.all('media_title'): media_id = m['_id'] if media_id not in media_ids: continue if offset > 0: offset -= 1 continue media = fire_event('media.get', media_id, single=True) # Skip if no media has been found if not media: continue # Merge releases with movie dict medias.append(media) # remove from media ids media_ids.remove(media_id) if len(media_ids) == 0 or len(medias) == limit: break return total_count, medias