def updateStatus(self, release_id, status = None): if not status: return False try: db = get_db() rel = db.get('id', release_id) if rel and rel.get('status') != status: release_name = None if rel.get('files'): for file_type in rel.get('files', {}): if file_type == 'movie': for release_file in rel['files'][file_type]: release_name = os.path.basename(release_file) break if not release_name and rel.get('info'): release_name = rel['info'].get('name') #update status in Db log.debug('Marking release %s as %s', (release_name, status)) rel['status'] = status rel['last_edit'] = int(time.time()) db.update(rel) #Update all movie info as there is no release update function fireEvent('notify.frontend', type = 'release.update_status', data = rel) return True except: log.error('Failed: %s', traceback.format_exc()) return False
def __init__(self): addApiView('release.manual_download', self.manualDownload, docs = { 'desc': 'Send a release manually to the downloaders', 'params': { 'id': {'type': 'id', 'desc': 'ID of the release object in release-table'} } }) addApiView('release.delete', self.deleteView, docs = { 'desc': 'Delete releases', 'params': { 'id': {'type': 'id', 'desc': 'ID of the release object in release-table'} } }) addApiView('release.ignore', self.ignore, docs = { 'desc': 'Toggle ignore, for bad or wrong releases', 'params': { 'id': {'type': 'id', 'desc': 'ID of the release object in release-table'} } }) addEvent('release.add', self.add) addEvent('release.download', self.download) addEvent('release.try_download_result', self.tryDownloadResult) addEvent('release.create_from_search', self.createFromSearch) addEvent('release.delete', self.delete) addEvent('release.clean', self.clean) addEvent('release.update_status', self.updateStatus) addEvent('release.with_status', self.withStatus) addEvent('release.for_media', self.forMedia) # Clean releases that didn't have activity in the last week addEvent('app.load', self.cleanDone, priority = 1000) fireEvent('schedule.interval', 'movie.clean_releases', self.cleanDone, hours = 12)
def updateSuggestionCache(self, ignore_imdb = None, limit = 6, ignored = None, seen = None): # Combine with previous suggestion_cache cached_suggestion = self.getCache('suggestion_cached') or [] new_suggestions = [] ignored = [] if not ignored else ignored seen = [] if not seen else seen if ignore_imdb: suggested_imdbs = [] for cs in cached_suggestion: if cs.get('imdb') != ignore_imdb and cs.get('imdb') not in suggested_imdbs: suggested_imdbs.append(cs.get('imdb')) new_suggestions.append(cs) # Get new results and add them if len(new_suggestions) - 1 < limit: active_movies = fireEvent('media.with_status', ['active', 'done'], single = True) movies = [getIdentifier(x) for x in active_movies] movies.extend(seen) ignored.extend([x.get('imdb') for x in cached_suggestion]) suggestions = fireEvent('movie.suggest', movies = movies, ignore = removeDuplicate(ignored), single = True) if suggestions: new_suggestions.extend(suggestions) self.setCache('suggestion_cached', new_suggestions, timeout = 3024000) return new_suggestions
def manualDownload(self, id = None, **kwargs): db = get_db() try: release = db.get('id', id) item = release['info'] movie = db.get('id', release['media_id']) fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Snatching "%s"' % item['name']) # Get matching provider provider = fireEvent('provider.belongs_to', item['url'], provider = item.get('provider'), single = True) if item.get('protocol') != 'torrent_magnet': item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download success = self.download(data = item, media = movie, manual = True) if success: fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Successfully snatched "%s"' % item['name']) return { 'success': success == True } except: log.error('Couldn\'t find release with id: %s: %s', (id, traceback.format_exc())) return { 'success': False }
def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = None): if not preferred_quality: preferred_quality = {} found = {} # Try guessing via quality tags guess = fireEvent('quality.guess', files = [nzb.get('name')], size = nzb.get('size', None), single = True) if guess: found[guess['identifier']] = True # Hack for older movies that don't contain quality tag name = nzb['name'] size = nzb.get('size', 0) year_name = fireEvent('scanner.name_year', name, single = True) if len(found) == 0 and movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None): if size > 20000: # Assume bd50 log.info('Quality was missing in name, assuming it\'s a BR-Disk based on the size: %s', size) found['bd50'] = True elif size > 3000: # Assume dvdr log.info('Quality was missing in name, assuming it\'s a DVD-R based on the size: %s', size) found['dvdr'] = True else: # Assume dvdrip log.info('Quality was missing in name, assuming it\'s a DVD-Rip based on the size: %s', size) found['dvdrip'] = True # Allow other qualities for allowed in preferred_quality.get('allow'): if found.get(allowed): del found[allowed] if found.get(preferred_quality['identifier']) and len(found) == 1: return False return found
def searchSingle(self, message = None, group = None): if not group: group = {} if self.isDisabled() or len(group['files']['trailer']) > 0: return trailers = fireEvent('trailer.search', group = group, merge = True) if not trailers or trailers == []: log.info('No trailers found for: %s', getTitle(group)) return False for trailer in trailers.get(self.conf('quality'), []): ext = getExt(trailer) filename = self.conf('name').replace('<filename>', group['filename']) + ('.%s' % ('mp4' if len(ext) > 5 else ext)) destination = os.path.join(group['destination_dir'], filename) if not os.path.isfile(destination): trailer_file = fireEvent('file.download', url = trailer, dest = destination, urlopen_kwargs = {'headers': {'User-Agent': 'Quicktime'}}, single = True) if trailer_file and os.path.getsize(trailer_file) < (1024 * 1024): # Don't trust small trailers (1MB), try next one os.unlink(trailer_file) continue else: log.debug('Trailer already exists: %s', destination) group['renamed_files'].append(destination) # Download first and break break return True
def search(self, q = '', types = None, **kwargs): # Make sure types is the correct instance if isinstance(types, (str, unicode)): types = [types] elif isinstance(types, (list, tuple, set)): types = list(types) imdb_identifier = getImdb(q) if not types: if imdb_identifier: result = fireEvent('movie.info', identifier = imdb_identifier, merge = True) result = {result['type']: [result]} else: result = fireEvent('info.search', q = q, merge = True) else: result = {} for media_type in types: if imdb_identifier: result[media_type] = fireEvent('%s.info' % media_type, identifier = imdb_identifier) else: result[media_type] = fireEvent('%s.search' % media_type, q = q) return mergeDicts({ 'success': True, }, result)
def searchAllView(self): results = {} for _type in fireEvent('media.types'): results[_type] = fireEvent('%s.searcher.all_view' % _type) return results
def append(self, result): new_result = self.fillResult(result) is_correct = fireEvent('searcher.correct_release', new_result, self.media, self.quality, imdb_results = self.kwargs.get('imdb_results', False), single = True) if is_correct and new_result['id'] not in self.result_ids: is_correct_weight = float(is_correct) new_result['score'] += fireEvent('score.calculate', new_result, self.media, single = True) old_score = new_result['score'] new_result['score'] = int(old_score * is_correct_weight) log.info2('Found correct release with weight %.02f, old_score(%d) now scaled to score(%d)', ( is_correct_weight, old_score, new_result['score'] )) self.found(new_result) self.result_ids.append(result['id']) super(ResultList, self).append(new_result)
def forceDefaults(self): db = get_db() # Fill qualities and profiles if they are empty somehow.. if db.count(db.all, 'profile') == 0: if db.count(db.all, 'quality') == 0: fireEvent('quality.fill', single = True) self.fill() # Get all active movies without profile try: medias = fireEvent('media.with_status', 'active', single = True) profile_ids = [x.get('_id') for x in self.all()] default_id = profile_ids[0] for media in medias: if media.get('profile_id') not in profile_ids: media['profile_id'] = default_id db.update(media) except: log.error('Failed: %s', traceback.format_exc())
def addMovies(self): movies = fireEvent('automation.get_movies', merge = True) movie_ids = [] for imdb_id in movies: if self.shuttingDown(): break prop_name = 'automation.added.%s' % imdb_id added = Env.prop(prop_name, default = False) if not added: added_movie = fireEvent('movie.add', params = {'identifier': imdb_id}, force_readd = False, search_after = False, update_after = True, single = True) if added_movie: movie_ids.append(added_movie['_id']) Env.prop(prop_name, True) for movie_id in movie_ids: if self.shuttingDown(): break movie_dict = fireEvent('media.get', movie_id, single = True) if movie_dict: fireEvent('movie.searcher.single', movie_dict) return True
def run(self): did_save = 0 for priority in sorted(self.modules): for module_name, plugin in sorted(self.modules[priority].items()): # Load module try: if plugin.get('name')[:2] == '__': continue m = self.loadModule(module_name) if m is None: continue # Save default settings for plugin/provider did_save += self.loadSettings(m, module_name, save = False) self.loadPlugins(m, plugin.get('type'), plugin.get('name')) except ImportError as e: # todo:: subclass ImportError for missing requirements. if e.message.lower().startswith("missing"): log.error(e.message) pass # todo:: this needs to be more descriptive. log.error('Import error, remove the empty folder: %s', plugin.get('module')) log.debug('Can\'t import %s: %s', (module_name, traceback.format_exc())) except: log.error('Can\'t import %s: %s', (module_name, traceback.format_exc())) if did_save: fireEvent('settings.save')
def __init__(self): fireEvent('scheduler.interval', identifier = 'manage.update_library', handle = self.updateLibrary, hours = 2) addEvent('manage.update', self.updateLibrary) addEvent('manage.diskspace', self.getDiskSpace) # Add files after renaming def after_rename(message = None, group = None): if not group: group = {} return self.scanFilesToLibrary(folder = group['destination_dir'], files = group['renamed_files'], release_download = group['release_download']) addEvent('renamer.after', after_rename, priority = 110) addApiView('manage.update', self.updateLibraryView, docs = { 'desc': 'Update the library by scanning for new movies', 'params': { 'full': {'desc': 'Do a full update or just recently changed/added movies.'}, } }) addApiView('manage.progress', self.getProgress, docs = { 'desc': 'Get the progress of current manage update', 'return': {'type': 'object', 'example': """{ 'progress': False || object, total & to_go, }"""}, }) if not Env.get('dev') and self.conf('startup_scan'): addEvent('app.load', self.updateLibraryQuick) addEvent('app.load', self.setCrons) # Enable / disable interval addEvent('setting.save.manage.library_refresh_interval.after', self.setCrons)
def withStatus(self, status, types = None, with_doc = True): db = get_db() if types and not isinstance(types, (list, tuple)): types = [types] status = list(status if isinstance(status, (list, tuple)) else [status]) for s in status: for ms in db.get_many('media_status', s): if with_doc: try: doc = db.get('id', ms['_id']) if types and doc.get('type') not in types: continue yield doc except (RecordDeleted, RecordNotFound): log.debug('Record not found, skipping: %s', ms['_id']) except (ValueError, EOFError): fireEvent('database.delete_corrupted', ms.get('_id'), traceback_error = traceback.format_exc(0)) else: yield ms
def setCrons(self): fireEvent('schedule.remove', 'manage.update_library') refresh = tryInt(self.conf('library_refresh_interval')) if refresh > 0: fireEvent('schedule.interval', 'manage.update_library', self.updateLibrary, hours = refresh, single = True) return True
def notifyFront(): try: media = fireEvent('media.get', media_id, single = True) if media: event_name = '%s.update' % media.get('type') fireEvent('notify.frontend', type = event_name, data = media) except: log.error('Failed creating onComplete: %s', traceback.format_exc())
def search(self, name, year = None): result = fireEvent('movie.search', q = '%s %s' % (name, year), limit = 1, merge = True) if len(result) > 0: movie = fireEvent('movie.info', identifier = result[0].get('imdb'), extended = False, merge = True) return movie else: return None
def bookmark(self, host = None, **kwargs): params = { 'includes': fireEvent('userscript.get_includes', merge = True), 'excludes': fireEvent('userscript.get_excludes', merge = True), 'host': host, } return self.renderTemplate(__file__, 'bookmark.js_tmpl', **params)
def afterUpdate(): if not self.in_progress or self.shuttingDown(): return total = self.in_progress[folder]['total'] movie_dict = fireEvent('media.get', identifier, single = True) if movie_dict: fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = None if total > 5 else 'Added "%s" to manage.' % getTitle(movie_dict))
def getMetaData(self, group, folder = '', release_download = None): data = {} files = list(group['files']['movie']) for cur_file in files: if not self.filesizeBetween(cur_file, self.file_sizes['movie']): continue # Ignore smaller files if not data.get('audio'): # Only get metadata from first media file meta = self.getMeta(cur_file) try: data['titles'] = meta.get('titles', []) data['video'] = meta.get('video', self.getCodec(cur_file, self.codecs['video'])) data['audio'] = meta.get('audio', self.getCodec(cur_file, self.codecs['audio'])) data['audio_channels'] = meta.get('audio_channels', 2.0) if meta.get('resolution_width'): data['resolution_width'] = meta.get('resolution_width') data['resolution_height'] = meta.get('resolution_height') data['aspect'] = round(float(meta.get('resolution_width')) / meta.get('resolution_height', 1), 2) else: data.update(self.getResolution(cur_file)) except: log.debug('Error parsing metadata: %s %s', (cur_file, traceback.format_exc())) pass data['size'] = data.get('size', 0) + self.getFileSize(cur_file) data['quality'] = None quality = fireEvent('quality.guess', size = data.get('size'), files = files, extra = data, single = True) # Use the quality that we snatched but check if it matches our guess if release_download and release_download.get('quality'): data['quality'] = fireEvent('quality.single', release_download.get('quality'), single = True) data['quality']['is_3d'] = release_download.get('is_3d', 0) if data['quality']['identifier'] != quality['identifier']: log.info('Different quality snatched than detected for %s: %s vs. %s. Assuming snatched quality is correct.', (files[0], data['quality']['identifier'], quality['identifier'])) if data['quality']['is_3d'] != quality['is_3d']: log.info('Different 3d snatched than detected for %s: %s vs. %s. Assuming snatched 3d is correct.', (files[0], data['quality']['is_3d'], quality['is_3d'])) if not data['quality']: data['quality'] = quality if not data['quality']: data['quality'] = fireEvent('quality.single', 'dvdr' if group['is_dvd'] else 'dvdrip', single = True) data['quality_type'] = 'HD' if data.get('resolution_width', 0) >= 1280 or data['quality'].get('hd') else 'SD' filename = re.sub(self.cp_imdb, '', files[0]) data['group'] = self.getGroup(filename[len(folder):]) data['source'] = self.getSourceMedia(filename) if data['quality'].get('is_3d', 0): data['3d_type'] = self.get3dType(filename) return data
def namePositionScore(nzb_name, movie_name): score = 0 nzb_words = re.split('\W+', simplifyString(nzb_name)) qualities = fireEvent('quality.all', single = True) try: nzb_name = re.search(r'([\'"])[^\1]*\1', nzb_name).group(0) except: pass name_year = fireEvent('scanner.name_year', nzb_name, single = True) # Give points for movies beginning with the correct name split_by = simplifyString(movie_name) name_split = [] if len(split_by) > 0: name_split = simplifyString(nzb_name).split(split_by) if name_split[0].strip() == '': score += 10 # If year is second in line, give more points if len(name_split) > 1 and name_year: after_name = name_split[1].strip() if tryInt(after_name[:4]) == name_year.get('year', None): score += 10 after_name = after_name[4:] # Give -point to crap between year and quality found_quality = None for quality in qualities: # Main in words if quality['identifier'] in nzb_words: found_quality = quality['identifier'] # Alt in words for alt in quality['alternative']: if alt in nzb_words: found_quality = alt break if not found_quality: return score - 20 allowed = [] for value in name_scores: name, sc = value.split(':') allowed.append(name) inbetween = re.split('\W+', after_name.split(found_quality)[0].strip()) score -= (10 * len(set(inbetween) - set(allowed))) return score
def suggestView(self, limit = 6, **kwargs): if self.isDisabled(): return { 'success': True, 'movies': [] } movies = splitString(kwargs.get('movies', '')) ignored = splitString(kwargs.get('ignored', '')) seen = splitString(kwargs.get('seen', '')) cached_suggestion = self.getCache('suggestion_cached') if cached_suggestion: suggestions = cached_suggestion else: if not movies or len(movies) == 0: active_movies = fireEvent('media.with_status', ['active', 'done'], types = 'movie', single = True) movies = [getIdentifier(x) for x in active_movies] if not ignored or len(ignored) == 0: ignored = splitString(Env.prop('suggest_ignore', default = '')) if not seen or len(seen) == 0: movies.extend(splitString(Env.prop('suggest_seen', default = ''))) suggestions = fireEvent('movie.suggest', movies = movies, ignore = ignored, single = True) self.setCache('suggestion_cached', suggestions, timeout = 6048000) # Cache for 10 weeks medias = [] for suggestion in suggestions[:int(limit)]: # Cache poster posters = suggestion.get('images', {}).get('poster', []) poster = [x for x in posters if 'tmdb' in x] posters = poster if len(poster) > 0 else posters cached_poster = fireEvent('file.download', url = posters[0], single = True) if len(posters) > 0 else False files = {'image_poster': [cached_poster] } if cached_poster else {} medias.append({ 'status': 'suggested', 'title': getTitle(suggestion), 'type': 'movie', 'info': suggestion, 'files': files, 'identifiers': { 'imdb': suggestion.get('imdb') } }) return { 'success': True, 'movies': medias }
def automationView(self, force_update = False, **kwargs): db = get_db() charts = fireEvent('automation.get_chart_list', merge = True) ignored = splitString(Env.prop('charts_ignore', default = '')) # Create a list the movie/list.js can use for chart in charts: medias = [] for media in chart.get('list', []): identifier = media.get('imdb') if identifier in ignored: continue try: try: in_library = db.get('media', 'imdb-%s' % identifier) if in_library: continue except RecordNotFound: pass except: pass # Cache poster posters = media.get('images', {}).get('poster', []) poster = [x for x in posters if 'tmdb' in x] posters = poster if len(poster) > 0 else posters cached_poster = fireEvent('file.download', url = posters[0], single = True) if len(posters) > 0 else False files = {'image_poster': [cached_poster] } if cached_poster else {} medias.append({ 'status': 'chart', 'title': getTitle(media), 'type': 'movie', 'info': media, 'files': files, 'identifiers': { 'imdb': identifier } }) chart['list'] = medias return { 'success': True, 'count': len(charts), 'charts': charts, 'ignored': ignored, }
def match(self, release, media, quality): match = fireEvent('matcher.parse', release['name'], single = True) if len(match.chains) < 1: log.info2('Wrong: %s, unable to parse release name (no chains)', release['name']) return False for chain in match.chains: if fireEvent('%s.matcher.correct' % media['type'], chain, release, media, quality, single = True): return chain return False
def scanFilesToLibrary(self, folder = None, files = None, release_download = None): folder = os.path.normpath(folder) groups = fireEvent('scanner.scan', folder = folder, files = files, single = True) if groups: for group in groups.values(): if group.get('media'): if release_download and release_download.get('release_id'): fireEvent('release.add', group = group, update_id = release_download.get('release_id')) else: fireEvent('release.add', group = group)
def check(self, force = False): if not force and self.isDisabled(): return if self.updater.check(): if not self.available_notified and self.conf('notification') and not self.conf('automatic'): info = self.updater.info() version_date = datetime.fromtimestamp(info['update_version']['date']) fireEvent('updater.available', message = 'A new update with hash "%s" is available, this version is from %s' % (info['update_version']['hash'], version_date), data = info) self.available_notified = True return True return False
def __init__(self): addEvent('file.download', self.download) addApiView('file.cache/(.*)', self.showCacheFile, static = True, docs = { 'desc': 'Return a file from the cp_data/cache directory', 'params': { 'filename': {'desc': 'path/filename of the wanted file'} }, 'return': {'type': 'file'} }) fireEvent('schedule.interval', 'file.cleanup', self.cleanup, hours = 24) addEvent('app.test', self.doSubfolderTest)
def addToLibrary(group, total_found, to_go): if self.in_progress[folder]['total'] is None: self.in_progress[folder].update({ 'total': total_found, 'to_go': total_found, }) self.updateProgress(folder, to_go) if group['media'] and group['identifier']: added_identifiers.append(group['identifier']) # Add it to release and update the info fireEvent('release.add', group = group, update_info = False) fireEvent('movie.update', identifier = group['identifier'], on_complete = self.createAfterUpdate(folder, group['identifier']))
def availableChars(self, types = None, status = None, release_status = None): db = get_db() # Make a list from string if status and not isinstance(status, (list, tuple)): status = [status] if release_status and not isinstance(release_status, (list, tuple)): release_status = [release_status] if types and not isinstance(types, (list, tuple)): types = [types] # query media ids if types: all_media_ids = set() for media_type in types: all_media_ids = all_media_ids.union(set([x['_id'] for x in db.get_many('media_by_type', media_type)])) else: all_media_ids = set([x['_id'] for x in db.all('media')]) media_ids = all_media_ids filter_by = {} # Filter on movie status if status and len(status) > 0: filter_by['media_status'] = set() for media_status in fireEvent('media.with_status', status, with_doc = False, single = True): filter_by['media_status'].add(media_status.get('_id')) # Filter on release status if release_status and len(release_status) > 0: filter_by['release_status'] = set() for release_status in fireEvent('release.with_status', release_status, with_doc = False, single = True): filter_by['release_status'].add(release_status.get('media_id')) # Filter by combining ids for x in filter_by: media_ids = [n for n in media_ids if n in filter_by[x]] chars = set() for x in db.all('media_startswith'): if x['_id'] in media_ids: chars.add(x['key']) if len(chars) == 27: break return list(chars)
def getIMDBids(self): movies = [] enablers = [tryInt(x) for x in splitString(self.conf("automation_urls_use"))] index = -1 for rss_url in splitString(self.conf("automation_urls")): index += 1 if not enablers[index]: continue rss_movies = self.getRSSData(rss_url, headers={"Referer": ""}) for movie in rss_movies: nameyear = fireEvent("scanner.name_year", self.getTextElement(movie, "title"), single=True) imdb = self.search(nameyear.get("name"), nameyear.get("year"), imdb_only=True) if not imdb: continue movies.append(imdb) return movies