def suggestView(self, limit=6, **kwargs): movies = splitString(kwargs.get('movies', '')) ignored = splitString(kwargs.get('ignored', '')) seen = splitString(kwargs.get('seen', '')) cached_suggestion = self.getCache('suggestion_cached') if cached_suggestion: suggestions = cached_suggestion else: if not movies or len(movies) == 0: active_movies = fireEvent('media.with_status', ['active', 'done'], single=True) movies = [getIdentifier(x) for x in active_movies] if not ignored or len(ignored) == 0: ignored = splitString(Env.prop('suggest_ignore', default='')) if not seen or len(seen) == 0: movies.extend(splitString(Env.prop('suggest_seen', default=''))) suggestions = fireEvent('movie.suggest', movies=movies, ignore=ignored, single=True) self.setCache('suggestion_cached', suggestions, timeout=6048000) # Cache for 10 weeks return { 'success': True, 'count': len(suggestions), 'suggestions': suggestions[:int(limit)] }
def getIMDBids(self): movies = [] watchlist_enablers = [ tryInt(x) for x in splitString(self.conf('automation_urls_use')) ] watchlist_urls = splitString(self.conf('automation_urls')) index = -1 for watchlist_url in watchlist_urls: try: # Get list ID ids = re.findall('(?:list/|list_id=)([a-zA-Z0-9\-_]{11})', watchlist_url) if len(ids) == 1: watchlist_url = 'http://www.imdb.com/list/%s/?view=compact&sort=created:asc' % ids[ 0] # Try find user id with watchlist else: userids = re.findall('(ur\d{7,9})', watchlist_url) if len(userids) == 1: watchlist_url = 'http://www.imdb.com/user/%s/watchlist?view=compact&sort=created:asc' % userids[ 0] except: log.error('Failed getting id from watchlist: %s', traceback.format_exc()) index += 1 if not watchlist_enablers[index]: continue start = 0 while True: try: w_url = '%s&start=%s' % (watchlist_url, start) imdbs = self.getFromURL(w_url) for imdb in imdbs: if imdb not in movies: movies.append(imdb) if self.shuttingDown(): break log.debug('Found %s movies on %s', (len(imdbs), w_url)) if len(imdbs) < 225: break start = len(movies) except: log.error('Failed loading IMDB watchlist: %s %s', (watchlist_url, traceback.format_exc())) break return movies
def toList(self, log_content = ''): logs_raw = toUnicode(log_content).split('[0m\n') logs = [] for log in logs_raw: split = splitString(log, '\x1b') if split: try: date, time, log_type = splitString(split[0], ' ') timestamp = '%s %s' % (date, time) except: timestamp = 'UNKNOWN' log_type = 'UNKNOWN' message = ''.join(split[1]) if len(split) > 1 else split[0] message = re.sub('\[\d+m\[', '[', message) logs.append({ 'time': timestamp, 'type': log_type, 'message': message }) return logs
def getWatchlist(self): enablers = [ tryInt(x) for x in splitString(self.conf('automation_urls_use')) ] urls = splitString(self.conf('automation_urls')) index = -1 movies = [] for username in urls: index += 1 if not enablers[index]: continue soup = BeautifulSoup(self.getHTMLData(self.url % (username, 1))) pagination = soup.find_all('li', attrs={'class': 'paginate-page'}) number_of_pages = tryInt( pagination[-1].find('a').get_text()) if pagination else 1 pages = range(1, number_of_pages) for page in pages: soup = BeautifulSoup( self.getHTMLData(self.url % (username, page))) movies += self.getMoviesFromHTML(soup) return movies
def getIMDBids(self): movies = [] watchlist_enablers = [ tryInt(x) for x in splitString(self.conf('automation_urls_use')) ] watchlist_urls = splitString(self.conf('automation_urls')) index = -1 for watchlist_url in watchlist_urls: index += 1 if not watchlist_enablers[index]: continue try: log.debug('Started IMDB watchlists: %s', watchlist_url) rss_data = self.getHTMLData(watchlist_url) imdbs = getImdb(rss_data, multiple=True) if rss_data else [] for imdb in imdbs: movies.append(imdb) if self.shuttingDown(): break except: log.error('Failed loading IMDB watchlist: %s %s', (watchlist_url, traceback.format_exc())) return movies
def toList(self, log_content=''): logs_raw = toUnicode(log_content).split('[0m\n') logs = [] for log in logs_raw: split = splitString(log, '\x1b') if split: try: date, time, log_type = splitString(split[0], ' ') timestamp = '%s %s' % (date, time) except: timestamp = 'UNKNOWN' log_type = 'UNKNOWN' message = ''.join(split[1]) if len(split) > 1 else split[0] message = re.sub('\[\d+m\[', '[', message) logs.append({ 'time': timestamp, 'type': log_type, 'message': message }) return logs
def getIMDBids(self): if self.isDisabled(): return movies = [] enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] urls = splitString(self.conf('automation_urls')) index = -1 for url in urls: index += 1 if not enablers[index]: continue try: cache_key = 'imdb.rss.%s' % md5(url) rss_data = self.getCache(cache_key, url) imdbs = getImdb(rss_data, multiple = True) for imdb in imdbs: movies.append(imdb) except: log.error('Failed loading IMDB watchlist: %s %s', (url, traceback.format_exc())) return movies
def getHosts(self): uses = splitString(str(self.conf('use')), clean=False) hosts = splitString(self.conf('host'), clean=False) api_keys = splitString(self.conf('api_key'), clean=False) extra_score = splitString(self.conf('extra_score'), clean=False) list = [] for nr in range(len(hosts)): try: key = api_keys[nr] except: key = '' try: host = hosts[nr] except: host = '' list.append({ 'use': uses[nr], 'host': host, 'api_key': key, 'extra_score': tryInt(extra_score[nr]) if len(extra_score) > nr else 0 }) return list
def isMinimalMovie(self, movie): if not movie.get('rating'): return False if movie['rating'] and movie['rating'].get('imdb'): movie['votes'] = movie['rating']['imdb'][1] movie['rating'] = movie['rating']['imdb'][0] for minimal_type in ['year', 'rating', 'votes']: type_value = movie.get(minimal_type, 0) type_min = self.getMinimal(minimal_type) if type_value < type_min: log.info('%s too low for %s, need %s has %s', (minimal_type, movie['original_title'], type_min, type_value)) return False movie_genres = [genre.lower() for genre in movie['genres']] required_genres = splitString(self.getMinimal('required_genres').lower()) ignored_genres = splitString(self.getMinimal('ignored_genres').lower()) req_match = 0 for req_set in required_genres: req = splitString(req_set, '&') req_match += len(list(set(movie_genres) & set(req))) == len(req) if self.getMinimal('required_genres') and req_match == 0: log.info2('Required genre(s) missing for %s', movie['original_title']) return False for ign_set in ignored_genres: ign = splitString(ign_set, '&') if len(list(set(movie_genres) & set(ign))) == len(ign): log.info2('%s has blacklisted genre(s): %s', (movie['original_title'], ign)) return False return True
def listView(self): params = getParams() status = splitString(params.get('status', None)) release_status = splitString(params.get('release_status', None)) limit_offset = params.get('limit_offset', None) starts_with = params.get('starts_with', None) search = params.get('search', None) order = params.get('order', None) total_movies, movies = self.list( status = status, release_status = release_status, limit_offset = limit_offset, starts_with = starts_with, search = search, order = order ) return jsonified({ 'success': True, 'empty': len(movies) == 0, 'total': total_movies, 'movies': movies, })
def getIMDBids(self): movies = [] enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] index = -1 for rss_url in splitString(self.conf('automation_urls')): index += 1 if not enablers[index]: continue rss_movies = self.getRSSData(rss_url, headers = {'Referer': ''}) for movie in rss_movies: nameyear = fireEvent('scanner.name_year', self.getTextElement(movie, 'title'), single = True) imdb = self.search(nameyear.get('name'), nameyear.get('year'), imdb_only = True) if not imdb: continue movies.append(imdb) return movies
def getHosts(self): uses = splitString(str(self.conf("use")), clean=False) hosts = splitString(self.conf("host"), clean=False) api_keys = splitString(self.conf("api_key"), clean=False) extra_score = splitString(self.conf("extra_score"), clean=False) custom_tags = splitString(self.conf("custom_tag"), clean=False) list = [] for nr in range(len(hosts)): try: key = api_keys[nr] except: key = "" try: host = hosts[nr] except: host = "" try: score = tryInt(extra_score[nr]) except: score = 0 try: custom_tag = custom_tags[nr] except: custom_tag = "" list.append({"use": uses[nr], "host": host, "api_key": key, "extra_score": score, "custom_tag": custom_tag}) return list
def getWatchlist(self): enablers = [ tryInt(x) for x in splitString(self.conf('automation_ids_use')) ] ids = splitString(self.conf('automation_ids')) index = -1 movies = [] for user_id in ids: index += 1 if not enablers[index]: continue data = self.getJsonData(self.url % user_id, decode_from='iso-8859-1') for movie in data: movies.append({ 'title': movie['movie']['title'], 'year': movie['movie']['year'] }) return movies
def suggestView(self, **kwargs): movies = splitString(kwargs.get('movies', '')) ignored = splitString(kwargs.get('ignored', '')) limit = kwargs.get('limit', 6) if not movies or len(movies) == 0: db = get_session() active_movies = db.query(Movie) \ .filter(or_(*[Movie.status.has(identifier = s) for s in ['active', 'done']])).all() movies = [x.library.identifier for x in active_movies] if not ignored or len(ignored) == 0: ignored = splitString(Env.prop('suggest_ignore', default='')) cached_suggestion = self.getCache('suggestion_cached') if cached_suggestion: suggestions = cached_suggestion else: suggestions = fireEvent('movie.suggest', movies=movies, ignore=ignored, single=True) self.setCache(md5(ss('suggestion_cached')), suggestions, timeout=6048000) # Cache for 10 weeks return { 'success': True, 'count': len(suggestions), 'suggestions': suggestions[:limit] }
def suggestView(self, limit = 6, **kwargs): movies = splitString(kwargs.get('movies', '')) ignored = splitString(kwargs.get('ignored', '')) seen = splitString(kwargs.get('seen', '')) cached_suggestion = self.getCache('suggestion_cached') if cached_suggestion: suggestions = cached_suggestion else: if not movies or len(movies) == 0: db = get_session() active_movies = db.query(Movie) \ .options(joinedload_all('library')) \ .filter(or_(*[Movie.status.has(identifier = s) for s in ['active', 'done']])).all() movies = [x.library.identifier for x in active_movies] if not ignored or len(ignored) == 0: ignored = splitString(Env.prop('suggest_ignore', default = '')) if not seen or len(seen) == 0: movies.extend(splitString(Env.prop('suggest_seen', default = ''))) suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True) self.setCache('suggestion_cached', suggestions, timeout = 6048000) # Cache for 10 weeks return { 'success': True, 'count': len(suggestions), 'suggestions': suggestions[:int(limit)] }
def getHosts(self): uses = splitString(str(self.conf('use')), clean = False) hosts = splitString(self.conf('host'), clean = False) api_keys = splitString(self.conf('api_key'), clean = False) extra_score = splitString(self.conf('extra_score'), clean = False) custom_tags = splitString(self.conf('custom_tag'), clean = False) list = [] for nr in range(len(hosts)): try: key = api_keys[nr] except: key = '' try: host = hosts[nr] except: host = '' try: score = tryInt(extra_score[nr]) except: score = 0 try: custom_tag = custom_tags[nr] except: custom_tag = '' list.append({ 'use': uses[nr], 'host': host, 'api_key': key, 'extra_score': score, 'custom_tag': custom_tag }) return list
def listView(self, **kwargs): types = splitString(kwargs.get('types')) status = splitString(kwargs.get('status')) release_status = splitString(kwargs.get('release_status')) limit_offset = kwargs.get('limit_offset') starts_with = kwargs.get('starts_with') search = kwargs.get('search') order = kwargs.get('order') total_movies, movies = self.list( types = types, status = status, release_status = release_status, limit_offset = limit_offset, starts_with = starts_with, search = search, order = order ) return { 'success': True, 'empty': len(movies) == 0, 'total': total_movies, 'movies': movies, }
def getFromURL(self, url): log.debug('Getting IMDBs from: %s', url) html = self.getHTMLData(url) try: split = splitString(html, split_on = "<div class=\"list compact\">")[1] html = splitString(split, split_on = "<div class=\"pages\">")[0] except: try: split = splitString(html, split_on = "<div id=\"main\">") if len(split) < 2: log.error('Failed parsing IMDB page "%s", unexpected html.', url) return [] html = BeautifulSoup(split[1]) for x in ['list compact', 'lister', 'list detail sub-list']: html2 = html.find('div', attrs = { 'class': x }) if html2: html = html2.contents html = ''.join([str(x) for x in html]) break except: log.error('Failed parsing IMDB page "%s": %s', (url, traceback.format_exc())) html = ss(html) imdbs = getImdb(html, multiple = True) if html else [] return imdbs
def suggestView(self, limit = 6, **kwargs): movies = splitString(kwargs.get('movies', '')) ignored = splitString(kwargs.get('ignored', '')) seen = splitString(kwargs.get('seen', '')) cached_suggestion = self.getCache('suggestion_cached') if cached_suggestion: suggestions = cached_suggestion else: if not movies or len(movies) == 0: active_movies = fireEvent('media.with_status', ['active', 'done'], single = True) movies = [getIdentifier(x) for x in active_movies] if not ignored or len(ignored) == 0: ignored = splitString(Env.prop('suggest_ignore', default = '')) if not seen or len(seen) == 0: movies.extend(splitString(Env.prop('suggest_seen', default = ''))) suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True) self.setCache('suggestion_cached', suggestions, timeout = 6048000) # Cache for 10 weeks return { 'success': True, 'count': len(suggestions), 'suggestions': suggestions[:int(limit)] }
def isMinimalMovie(self, movie): if not movie.get('rating'): return False if movie['rating'] and movie['rating'].get('imdb'): movie['votes'] = movie['rating']['imdb'][1] movie['rating'] = movie['rating']['imdb'][0] for minimal_type in ['year', 'rating', 'votes']: type_value = movie.get(minimal_type, 0) type_min = self.getMinimal(minimal_type) if type_value < type_min: log.info('%s too low for %s, need %s has %s', (minimal_type, movie['original_title'], type_min, type_value)) return False movie_genres = [genre.lower() for genre in movie['genres']] required_genres = splitString(self.getMinimal('required_genres').lower()) ignored_genres = splitString(self.getMinimal('ignored_genres').lower()) req_match = 0 for req_set in required_genres: req = splitString(req_set, '&') req_match += len(list(set(movie_genres) & set(req))) == len(req) if self.getMinimal('required_genres') and req_match == 0: log.info2("Required genre(s) missing for %s" % movie['original_title']) return False for ign_set in ignored_genres: ign = splitString(ign_set, '&') if len(list(set(movie_genres) & set(ign))) == len(ign): log.info2("%s has blacklisted genre(s): %s" % (movie['original_title'], ign)) return False return True
def getIMDBids(self): movies = [] enablers = [ tryInt(x) for x in splitString(self.conf('automation_urls_use')) ] urls = splitString(self.conf('automation_urls')) index = -1 for url in urls: index += 1 if not enablers[index]: continue try: rss_data = self.getHTMLData(url) imdbs = getImdb(rss_data, multiple=True) if rss_data else [] for imdb in imdbs: movies.append(imdb) except: log.error('Failed loading IMDB watchlist: %s %s', (url, traceback.format_exc())) return movies
def getIMDBids(self): movies = [] watchlist_enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] watchlist_urls = splitString(self.conf('automation_urls')) index = -1 for watchlist_url in watchlist_urls: index += 1 if not watchlist_enablers[index]: continue try: log.debug('Started IMDB watchlists: %s', watchlist_url) rss_data = self.getHTMLData(watchlist_url) imdbs = getImdb(rss_data, multiple = True) if rss_data else [] for imdb in imdbs: movies.append(imdb) if self.shuttingDown(): break except: log.error('Failed loading IMDB watchlist: %s %s', (watchlist_url, traceback.format_exc())) return movies
def getFromURL(self, url): log.debug("Getting IMDBs from: %s", url) html = self.getHTMLData(url) try: split = splitString(html, split_on='<div class="list compact">')[1] html = splitString(split, split_on='<div class="pages">')[0] except: try: split = splitString(html, split_on='<div id="main">') if len(split) < 2: log.error('Failed parsing IMDB page "%s", unexpected html.', url) return [] html = BeautifulSoup(split[1]) for x in ["list compact", "lister", "list detail sub-list"]: html2 = html.find("div", attrs={"class": x}) if html2: html = html2.contents html = "".join([str(x) for x in html]) break except: log.error('Failed parsing IMDB page "%s": %s', (url, traceback.format_exc())) html = ss(html) imdbs = getImdb(html, multiple=True) if html else [] return imdbs
def ignoreView(self, imdb=None, limit=6, remove_only=False, mark_seen=False, **kwargs): ignored = splitString(Env.prop('suggest_ignore', default='')) seen = splitString(Env.prop('suggest_seen', default='')) new_suggestions = [] if imdb: if mark_seen: seen.append(imdb) Env.prop('suggest_seen', ','.join(set(seen))) elif not remove_only: ignored.append(imdb) Env.prop('suggest_ignore', ','.join(set(ignored))) new_suggestions = self.updateSuggestionCache(ignore_imdb=imdb, limit=limit, ignored=ignored, seen=seen) return { 'result': True, 'ignore_count': len(ignored), 'suggestions': new_suggestions[limit - 1:limit] }
def getIMDBids(self): movies = [] enablers = [tryInt(x) for x in splitString(self.conf('automation_urls_use'))] urls = splitString(self.conf('automation_urls')) index = -1 for url in urls: index += 1 if not enablers[index]: continue try: rss_data = self.getHTMLData(url) imdbs = getImdb(rss_data, multiple = True) for imdb in imdbs: movies.append(imdb) except: log.error('Failed loading IMDB watchlist: %s %s', (url, traceback.format_exc())) return movies
def charView(self, **kwargs): type = splitString(kwargs.get("type", "movie")) status = splitString(kwargs.get("status", None)) release_status = splitString(kwargs.get("release_status", None)) chars = self.availableChars(type, status, release_status) return {"success": True, "empty": len(chars) == 0, "chars": chars}
def charView(self): params = getParams() status = splitString(params.get("status", None)) release_status = splitString(params.get("release_status", None)) chars = self.availableChars(status, release_status) return jsonified({"success": True, "empty": len(chars) == 0, "chars": chars})
def suggestView(self, limit=6, **kwargs): if self.isDisabled(): return {'success': True, 'movies': []} movies = splitString(kwargs.get('movies', '')) ignored = splitString(kwargs.get('ignored', '')) seen = splitString(kwargs.get('seen', '')) cached_suggestion = self.getCache('suggestion_cached') if cached_suggestion: suggestions = cached_suggestion else: if not movies or len(movies) == 0: active_movies = fireEvent('media.with_status', ['active', 'done'], types='movie', single=True) movies = [getIdentifier(x) for x in active_movies] if not ignored or len(ignored) == 0: ignored = splitString(Env.prop('suggest_ignore', default='')) if not seen or len(seen) == 0: movies.extend(splitString(Env.prop('suggest_seen', default=''))) suggestions = fireEvent('movie.suggest', movies=movies, ignore=ignored, single=True) self.setCache('suggestion_cached', suggestions, timeout=6048000) # Cache for 10 weeks medias = [] for suggestion in suggestions[:int(limit)]: # Cache poster posters = suggestion.get('images', {}).get('poster', []) poster = [x for x in posters if 'tmdb' in x] posters = poster if len(poster) > 0 else posters cached_poster = fireEvent( 'file.download', url=posters[0], single=True) if len(posters) > 0 else False files = {'image_poster': [cached_poster]} if cached_poster else {} medias.append({ 'status': 'suggested', 'title': getTitle(suggestion), 'type': 'movie', 'info': suggestion, 'files': files, 'identifiers': { 'imdb': suggestion.get('imdb') } }) return {'success': True, 'movies': medias}
def parseMovie(self, movie): movie_data = {} try: try: if isinstance(movie, (str, unicode)): movie = json.loads(movie) except ValueError: log.info('No proper json to decode') return movie_data if movie.get('Response') == 'Parse Error' or movie.get( 'Response') == 'False': return movie_data if movie.get('Type').lower() != 'movie': return movie_data tmp_movie = movie.copy() for key in tmp_movie: if tmp_movie.get(key).lower() == 'n/a': del movie[key] year = tryInt(movie.get('Year', '')) movie_data = { 'type': 'movie', 'via_imdb': True, 'titles': [movie.get('Title')] if movie.get('Title') else [], 'original_title': movie.get('Title'), 'images': { 'poster': [movie.get('Poster', '')] if movie.get('Poster') and len(movie.get('Poster', '')) > 4 else [], }, 'rating': { 'imdb': (tryFloat(movie.get('imdbRating', 0)), tryInt(movie.get('imdbVotes', '').replace(',', ''))), #'rotten': (tryFloat(movie.get('tomatoRating', 0)), tryInt(movie.get('tomatoReviews', '').replace(',', ''))), }, 'imdb': str(movie.get('imdbID', '')), 'mpaa': str(movie.get('Rated', '')), 'runtime': self.runtimeToMinutes(movie.get('Runtime', '')), 'released': movie.get('Released'), 'year': year if isinstance(year, int) else None, 'plot': movie.get('Plot'), 'genres': splitString(movie.get('Genre', '')), 'directors': splitString(movie.get('Director', '')), 'writers': splitString(movie.get('Writer', '')), 'actors': splitString(movie.get('Actors', '')), } movie_data = dict((k, v) for k, v in movie_data.items() if v) except: log.error('Failed parsing IMDB API json: %s', traceback.format_exc()) return movie_data
def getMovie(self, url): name = splitString(splitString(url, '/ijw_')[-1], '/')[0] if name.startswith('ijw_'): name = name[4:] year_name = fireEvent('scanner.name_year', name, single = True) return self.search(year_name.get('name'), year_name.get('year'))
def parseMovie(self, movie): movie_data = {} try: try: if isinstance(movie, (str, unicode)): movie = json.loads(movie) except ValueError: log.info('No proper json to decode') return movie_data if movie.get('Response') == 'Parse Error' or movie.get('Response') == 'False': return movie_data if movie.get('Type').lower() != 'movie': return movie_data tmp_movie = movie.copy() for key in tmp_movie: tmp_movie_elem = tmp_movie.get(key) if not isinstance(tmp_movie_elem, (str, unicode)) or tmp_movie_elem.lower() == 'n/a': del movie[key] year = tryInt(movie.get('Year', '')) movie_data = { 'type': 'movie', 'via_imdb': True, 'titles': [movie.get('Title')] if movie.get('Title') else [], 'original_title': movie.get('Title'), 'images': { 'poster': [movie.get('Poster', '')] if movie.get('Poster') and len(movie.get('Poster', '')) > 4 else [], }, 'rating': { 'imdb': (tryFloat(movie.get('imdbRating', 0)), tryInt(movie.get('imdbVotes', '').replace(',', ''))), #'rotten': (tryFloat(movie.get('tomatoRating', 0)), tryInt(movie.get('tomatoReviews', '').replace(',', ''))), }, 'imdb': str(movie.get('imdbID', '')), 'mpaa': str(movie.get('Rated', '')), 'runtime': self.runtimeToMinutes(movie.get('Runtime', '')), 'released': movie.get('Released'), 'year': year if isinstance(year, int) else None, 'plot': movie.get('Plot'), 'genres': splitString(movie.get('Genre', '')), 'directors': splitString(movie.get('Director', '')), 'writers': splitString(movie.get('Writer', '')), 'actors': splitString(movie.get('Actors', '')), 'languages' : fillingLanguages(splitString(movie.get('Language', ''))) } movie_data = dict((k, v) for k, v in movie_data.items() if v) except: log.error('Failed parsing IMDB API json: %s', traceback.format_exc()) return movie_data
def parseMovie(self, movie): movie_data = {} try: try: if isinstance(movie, (str, unicode)): movie = json.loads(movie) except ValueError: log.info('No proper json to decode') return movie_data if movie.get('Response') == 'Parse Error' or movie.get( 'Response') == 'False': return movie_data tmp_movie = movie.copy() for key in tmp_movie: if tmp_movie.get(key).lower() == 'n/a': del movie[key] year = tryInt(movie.get('Year', '')) movie_data = { 'via_imdb': True, 'titles': [movie.get('Title')] if movie.get('Title') else [], 'original_title': movie.get('Title', ''), 'images': { 'poster': [movie.get('Poster', '')] if movie.get('Poster') and len(movie.get('Poster', '')) > 4 else [], }, 'rating': { 'imdb': (tryFloat(movie.get('imdbRating', 0)), tryInt(movie.get('imdbVotes', '').replace(',', ''))), #'rotten': (tryFloat(movie.get('tomatoRating', 0)), tryInt(movie.get('tomatoReviews', '').replace(',', ''))), }, 'imdb': str(movie.get('imdbID', '')), 'runtime': self.runtimeToMinutes(movie.get('Runtime', '')), 'released': movie.get('Released', ''), 'year': year if isinstance(year, (int)) else None, 'plot': movie.get('Plot', ''), 'genres': splitString(movie.get('Genre', '')), 'directors': splitString(movie.get('Director', '')), 'writers': splitString(movie.get('Writer', '')), 'actors': splitString(movie.get('Actors', '')), } # Remove plot as it is always in english (and tmdb provides a localized plot) if not Env.setting('language').startswith('en'): del (movie_data['plot']) except: log.error('Failed parsing IMDB API json: %s', traceback.format_exc()) return movie_data
def getIMDBids(self): movies = [] watchlist_enablers = [tryInt(x) for x in splitString(self.conf("automation_urls_use"))] watchlist_urls = splitString(self.conf("automation_urls")) index = -1 for watchlist_url in watchlist_urls: try: # Get list ID ids = re.findall("(?:list/|list_id=)([a-zA-Z0-9\-_]{11})", watchlist_url) if len(ids) == 1: watchlist_url = "http://www.imdb.com/list/%s/?view=compact&sort=created:asc" % ids[0] # Try find user id with watchlist else: userids = re.findall("(ur\d{7,9})", watchlist_url) if len(userids) == 1: watchlist_url = ( "http://www.imdb.com/user/%s/watchlist?view=compact&sort=created:asc" % userids[0] ) except: log.error("Failed getting id from watchlist: %s", traceback.format_exc()) index += 1 if not watchlist_enablers[index]: continue start = 0 while True: try: w_url = "%s&start=%s" % (watchlist_url, start) imdbs = self.getFromURL(w_url) for imdb in imdbs: if imdb not in movies: movies.append(imdb) if self.shuttingDown(): break log.debug("Found %s movies on %s", (len(imdbs), w_url)) if len(imdbs) < 225: break start = len(movies) except: log.error("Failed loading IMDB watchlist: %s %s", (watchlist_url, traceback.format_exc())) break return movies
def suggestView(self, limit = 6, **kwargs): if self.isDisabled(): return { 'success': True, 'movies': [] } movies = splitString(kwargs.get('movies', '')) ignored = splitString(kwargs.get('ignored', '')) seen = splitString(kwargs.get('seen', '')) cached_suggestion = self.getCache('suggestion_cached') if cached_suggestion: suggestions = cached_suggestion else: if not movies or len(movies) == 0: active_movies = fireEvent('media.with_status', ['active', 'done'], types = 'movie', single = True) movies = [getIdentifier(x) for x in active_movies] if not ignored or len(ignored) == 0: ignored = splitString(Env.prop('suggest_ignore', default = '')) if not seen or len(seen) == 0: movies.extend(splitString(Env.prop('suggest_seen', default = ''))) suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True) self.setCache('suggestion_cached', suggestions, timeout = 6048000) # Cache for 10 weeks medias = [] for suggestion in suggestions[:int(limit)]: # Cache poster posters = suggestion.get('images', {}).get('poster', []) poster = [x for x in posters if 'tmdb' in x] posters = poster if len(poster) > 0 else posters cached_poster = fireEvent('file.download', url = posters[0], single = True) if len(posters) > 0 else False files = {'image_poster': [cached_poster] } if cached_poster else {} medias.append({ 'status': 'suggested', 'title': getTitle(suggestion), 'type': 'movie', 'info': suggestion, 'files': files, 'identifiers': { 'imdb': suggestion.get('imdb') } }) return { 'success': True, 'movies': medias }
def charView(self, **kwargs): status = splitString(kwargs.get('status', None)) release_status = splitString(kwargs.get('release_status', None)) chars = self.availableChars(status, release_status) return { 'success': True, 'empty': len(chars) == 0, 'chars': chars, }
def charView(self): params = getParams() status = splitString(params.get('status', None)) release_status = splitString(params.get('release_status', None)) chars = self.availableChars(status, release_status) return jsonified({ 'success': True, 'empty': len(chars) == 0, 'chars': chars, })
def getIMDBids(self): movies = [] enablers = [ tryInt(x) for x in splitString(self.conf('automation_urls_use')) ] urls = splitString(self.conf('automation_urls')) namespace = 'http://www.w3.org/2005/Atom' namespace_im = 'http://itunes.apple.com/rss' index = -1 for url in urls: index += 1 if len(enablers ) == 0 or len(enablers) < index or not enablers[index]: continue try: cache_key = 'itunes.rss.%s' % md5(url) rss_data = self.getCache(cache_key, url) data = XMLTree.fromstring(rss_data) if data is not None: entry_tag = str(QName(namespace, 'entry')) rss_movies = self.getElements(data, entry_tag) for movie in rss_movies: name_tag = str(QName(namespace_im, 'name')) name = self.getTextElement(movie, name_tag) releaseDate_tag = str( QName(namespace_im, 'releaseDate')) releaseDateText = self.getTextElement( movie, releaseDate_tag) year = datetime.datetime.strptime( releaseDateText, '%Y-%m-%dT00:00:00-07:00').strftime("%Y") imdb = self.search(name, year) if imdb and self.isMinimalMovie(imdb): movies.append(imdb['imdb']) except: log.error('Failed loading iTunes rss feed: %s %s', (url, traceback.format_exc())) return movies
def parseMovie(self, movie): movie_data = {} try: try: if isinstance(movie, (str, unicode)): movie = json.loads(movie) except ValueError: log.info("No proper json to decode") return movie_data if movie.get("Response") == "Parse Error" or movie.get("Response") == "False": return movie_data tmp_movie = movie.copy() for key in tmp_movie: if tmp_movie.get(key).lower() == "n/a": del movie[key] year = tryInt(movie.get("Year", "")) movie_data = { "type": "movie", "via_imdb": True, "titles": [movie.get("Title")] if movie.get("Title") else [], "original_title": movie.get("Title"), "images": { "poster": [movie.get("Poster", "")] if movie.get("Poster") and len(movie.get("Poster", "")) > 4 else [] }, "rating": { "imdb": (tryFloat(movie.get("imdbRating", 0)), tryInt(movie.get("imdbVotes", "").replace(",", ""))), #'rotten': (tryFloat(movie.get('tomatoRating', 0)), tryInt(movie.get('tomatoReviews', '').replace(',', ''))), }, "imdb": str(movie.get("imdbID", "")), "mpaa": str(movie.get("Rated", "")), "runtime": self.runtimeToMinutes(movie.get("Runtime", "")), "released": movie.get("Released"), "year": year if isinstance(year, int) else None, "plot": movie.get("Plot"), "genres": splitString(movie.get("Genre", "")), "directors": splitString(movie.get("Director", "")), "writers": splitString(movie.get("Writer", "")), "actors": splitString(movie.get("Actors", "")), } movie_data = dict((k, v) for k, v in movie_data.iteritems() if v) except: log.error("Failed parsing IMDB API json: %s", traceback.format_exc()) return movie_data
def parseMovie(self, movie): movie_data = {} try: try: if isinstance(movie, (str, unicode)): movie = json.loads(movie) except ValueError: log.info('No proper json to decode') return movie_data if movie.get('Response') == 'Parse Error' or movie.get('Response') == 'False': return movie_data tmp_movie = movie.copy() for key in tmp_movie: if tmp_movie.get(key).lower() == 'n/a': del movie[key] year = tryInt(movie.get('Year', '')) movie_data = { 'via_imdb': True, 'titles': [movie.get('Title')] if movie.get('Title') else [], 'original_title': movie.get('Title', ''), 'images': { 'poster': [movie.get('Poster', '')] if movie.get('Poster') and len(movie.get('Poster', '')) > 4 else [], }, 'rating': { 'imdb': (tryFloat(movie.get('imdbRating', 0)), tryInt(movie.get('imdbVotes', '').replace(',', ''))), #'rotten': (tryFloat(movie.get('tomatoRating', 0)), tryInt(movie.get('tomatoReviews', '').replace(',', ''))), }, 'imdb': str(movie.get('imdbID', '')), 'runtime': self.runtimeToMinutes(movie.get('Runtime', '')), 'released': movie.get('Released', ''), 'year': year if isinstance(year, (int)) else None, 'plot': movie.get('Plot', ''), 'genres': splitString(movie.get('Genre', '')), 'directors': splitString(movie.get('Director', '')), 'writers': splitString(movie.get('Writer', '')), 'actors': splitString(movie.get('Actors', '')), } # Remove plot as it is always in english (and tmdb provides a localized plot) if not Env.setting('language').startswith( 'en' ): del( movie_data['plot'] ) except: log.error('Failed parsing IMDB API json: %s', traceback.format_exc()) return movie_data
def calculate(self, nzb, movie): """ Calculate the score of a NZB, used for sorting later """ # Merge global and category preferred_words = splitString(Env.setting('preferred_words', section = 'searcher').lower()) try: preferred_words = list(set(preferred_words + splitString(movie['category']['preferred'].lower()))) except: pass score = nameScore(toUnicode(nzb['name']), movie['library']['year'], preferred_words) for movie_title in movie['library']['titles']: score += nameRatioScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) score += namePositionScore(toUnicode(nzb['name']), toUnicode(movie_title['title'])) score += sizeScore(nzb['size']) # Torrents only if nzb.get('seeders'): try: score += nzb.get('seeders') * 100 / 15 score += nzb.get('leechers') * 100 / 30 except: pass # Provider score score += providerScore(nzb['provider']) # Duplicates in name score += duplicateScore(nzb['name'], getTitle(movie['library'])) # Merge global and category ignored_words = splitString(Env.setting('ignored_words', section = 'searcher').lower()) try: ignored_words = list(set(ignored_words + splitString(movie['category']['ignored'].lower()))) except: pass # Partial ignored words score += partialIgnoredScore(nzb['name'], getTitle(movie['library']), ignored_words) # Ignore single downloads from multipart score += halfMultipartScore(nzb['name']) # Extra provider specific check extra_score = nzb.get('extra_score') if extra_score: score += extra_score(nzb) # Scene / Nuke scoring score += sceneScore(nzb['name']) return score
def listView(self, **kwargs): total_movies, movies = self.list( types=splitString(kwargs.get("type")), status=splitString(kwargs.get("status")), release_status=splitString(kwargs.get("release_status")), status_or=kwargs.get("status_or") is not None, limit_offset=kwargs.get("limit_offset"), with_tags=splitString(kwargs.get("with_tags")), starts_with=kwargs.get("starts_with"), search=kwargs.get("search"), ) return {"success": True, "empty": len(movies) == 0, "total": total_movies, "movies": movies}
def getHosts(self): uses = splitString(str(self.conf('use'))) hosts = splitString(self.conf('host')) api_keys = splitString(self.conf('api_key')) list = [] for nr in range(len(hosts)): list.append({ 'use': uses[nr], 'host': hosts[nr], 'api_key': api_keys[nr] }) return list
def refresh(self): db = get_session() for id in splitString(getParam('id')): movie = db.query(Movie).filter_by(id=id).first() if movie: # Get current selected title default_title = '' for title in movie.library.titles: if title.default: default_title = title.title fireEvent('notify.frontend', type='movie.busy.%s' % id, data=True, message='Updating "%s"' % default_title) fireEventAsync('library.update', identifier=movie.library.identifier, default_title=default_title, force=True, on_complete=self.createOnComplete(id)) #db.close() return jsonified({ 'success': True, })
def listView(self, limit_offset=None, **kwargs): db = get_session() q = db.query(Notif) if limit_offset: splt = splitString(limit_offset) limit = splt[0] offset = 0 if len(splt) is 1 else splt[1] q = q.limit(limit).offset(offset) else: q = q.limit(200) results = q.all() notifications = [] for n in results: ndict = n.to_dict() ndict['type'] = 'notification' notifications.append(ndict) return { 'success': True, 'empty': len(notifications) == 0, 'notifications': notifications }
def notify(self, message='', data={}, listener=None): if self.isDisabled(): return nma = pynma.PyNMA() keys = splitString(self.conf('api_key')) nma.addkey(keys) nma.developerkey(self.conf('dev_key')) # hacky fix for the event type # as it seems to be part of the message now self.event = message.split(' ')[0] response = nma.push(application=self.default_title, event=self.event, description=message, priority=self.conf('priority'), batch_mode=len(keys) > 1) successful = 0 for key in keys: if not response[str(key)]['code'] == u'200': log.error( 'Could not send notification to NotifyMyAndroid (%s). %s', (key, response[key]['message'])) else: successful += 1 return successful == len(keys)
def listView(self): db = get_session() limit_offset = getParam('limit_offset', None) q = db.query(Notif) if limit_offset: splt = splitString(limit_offset) limit = splt[0] offset = 0 if len(splt) is 1 else splt[1] q = q.limit(limit).offset(offset) else: q = q.limit(200) results = q.all() notifications = [] for n in results: ndict = n.to_dict() ndict['type'] = 'notification' notifications.append(ndict) return jsonified({ 'success': True, 'empty': len(notifications) == 0, 'notifications': notifications })
def refresh(self, id='', **kwargs): db = get_session() for x in splitString(id): movie = db.query(Movie).filter_by(id=x).first() if movie: # Get current selected title default_title = '' for title in movie.library.titles: if title.default: default_title = title.title fireEvent('notify.frontend', type='movie.busy.%s' % x, data=True) fireEventAsync('library.update', identifier=movie.library.identifier, default_title=default_title, force=True, on_complete=self.createOnComplete(x)) db.expire_all() return { 'success': True, }
def getLanguages(self): return ['it'] languages = splitString(Env.setting('languages', section='core')) if len(languages): return languages return ['it']
def deleteView(self, id="", **kwargs): ids = splitString(id) for media_id in ids: self.delete(media_id, delete_from=kwargs.get("delete_from", "all")) return {"success": True}
def notify(self, message='', data=None, listener=None): if not data: data = {} hosts = splitString(self.conf('host')) successful = 0 max_successful = 0 for host in hosts: if self.use_json_notifications.get(host) is None: self.getXBMCJSONversion(host, message=message) if self.use_json_notifications.get(host): calls = [ ('GUI.ShowNotification', { 'title': self.default_title, 'message': message, 'image': self.getNotificationImage('small') }), ] if data and data.get('destination_dir') and ( not self.conf('only_first') or hosts.index(host) == 0): param = {} if not self.conf('force_full_scan') and ( self.conf('remote_dir_scan') or socket.getfqdn('localhost') == socket.getfqdn( host.split(':')[0])): param = {'directory': data['destination_dir']} calls.append(('VideoLibrary.Scan', param)) max_successful += len(calls) response = self.request(host, calls) else: response = self.notifyXBMCnoJSON(host, { 'title': self.default_title, 'message': message }) if data and data.get('destination_dir') and ( not self.conf('only_first') or hosts.index(host) == 0): response += self.request(host, [('VideoLibrary.Scan', {})]) max_successful += 1 max_successful += 1 try: for result in response: if result.get('result') and result['result'] == 'OK': successful += 1 elif result.get('error'): log.error('XBMC error; %s: %s (%s)', (result['id'], result['error']['message'], result['error']['code'])) except: log.error('Failed parsing results: %s', traceback.format_exc()) return successful == max_successful
def notify(self, message = '', data = {}, listener = None): nma = pynma.PyNMA() keys = splitString(self.conf('api_key')) nma.addkey(keys) nma.developerkey(self.conf('dev_key')) # hacky fix for the event type # as it seems to be part of the message now self.event = message.split(' ')[0] response = nma.push( application = self.default_title, event = self.event, description = message, priority = self.conf('priority'), batch_mode = len(keys) > 1 ) successful = 0 for key in keys: if not response[str(key)]['code'] == u'200': log.error('Could not send notification to NotifyMyAndroid (%s). %s', (key, response[key]['message'])) else: successful += 1 return successful == len(keys)
def getIMDBids(self): movies = [] urls = dict(zip(splitString(self.conf('automation_urls')), [tryInt(x) for x in splitString(self.conf('automation_urls_use'))])) for url in urls: if not urls[url]: continue rss_movies = self.getRSSData(url) for movie in rss_movies: description = self.getTextElement(movie, 'description') grabs = 0 for item in movie: if item.attrib.get('name') == 'grabs': grabs = item.attrib.get('value') break if int(grabs) > tryInt(self.conf('number_grabs')): title = re.match(r'.*Title: .a href.*/">(.*) \(\d{4}\).*', description).group(1) log.info2('%s grabs for movie: %s, enqueue...', (grabs, title)) year = re.match(r'.*Year: (\d{4}).*', description).group(1) imdb = self.search(title, year) if imdb and self.isMinimalMovie(imdb): movies.append(imdb['imdb']) return movies