def __init__(self): super(CoreNotifier, self).__init__() addEvent('notify', self.notify) addEvent('notify.frontend', self.frontend) addApiView('notification.markread', self.markAsRead, docs = { 'desc': 'Mark notifications as read', 'params': { 'ids': {'desc': 'Notification id you want to mark as read. All if ids is empty.', 'type': 'int (comma separated)'}, }, }) addApiView('notification.list', self.listView, docs = { 'desc': 'Get list of notifications', 'params': { 'limit_offset': {'desc': 'Limit and offset the notification list. Examples: "50" or "50,30"'}, }, 'return': {'type': 'object', 'example': """{ 'success': True, 'empty': bool, any notification returned or not, 'notifications': array, notifications found, }"""} }) addNonBlockApiView('notification.listener', (self.addListener, self.removeListener)) addApiView('notification.listener', self.listener) fireEvent('schedule.interval', 'core.check_messages', self.checkMessages, hours = 12, single = True) addEvent('app.load', self.clean) addEvent('app.load', self.checkMessages)
def __init__(self): if Env.get('desktop'): self.updater = DesktopUpdater() elif os.path.isdir(os.path.join(Env.get('app_dir'), '.git')): self.updater = GitUpdater(self.conf('git_command', default = 'git')) else: self.updater = SourceUpdater() fireEvent('schedule.interval', 'updater.check', self.autoUpdate, hours = 6) addEvent('app.load', self.autoUpdate) addEvent('updater.info', self.info) addApiView('updater.info', self.getInfo, docs = { 'desc': 'Get updater information', 'return': { 'type': 'object', 'example': """{ 'last_check': "last checked for update", 'update_version': "available update version or empty", 'version': current_cp_version }"""} }) addApiView('updater.update', self.doUpdateView) addApiView('updater.check', self.checkView, docs = { 'desc': 'Check for available update', 'return': {'type': 'see updater.info'} })
def scanFolderToLibrary(self, folder = None, newer_than = 0, simple = True): folder = os.path.normpath(folder) if not os.path.isdir(folder): return groups = self.scan(folder = folder, simple = simple, newer_than = newer_than) added_identifier = [] while True and not self.shuttingDown(): try: identifier, group = groups.popitem() except: break # Save to DB if group['library']: # Add release fireEvent('release.add', group = group) library_item = fireEvent('library.update', identifier = group['library'].get('identifier'), single = True) if library_item: added_identifier.append(library_item['identifier']) return added_identifier
def forceDefaults(self): db = get_db() # Fill qualities and profiles if they are empty somehow.. if db.count(db.all, 'profile') == 0: if db.count(db.all, 'quality') == 0: fireEvent('quality.fill', single = True) self.fill() # Get all active movies without profile try: medias = fireEvent('media.with_status', 'active', single = True) profile_ids = [x.get('_id') for x in self.all()] default_id = profile_ids[0] for media in medias: if media.get('profile_id') not in profile_ids: media['profile_id'] = default_id db.update(media) except: log.error('Failed: %s', traceback.format_exc())
def getMetaData(self, group): data = {} files = list(group['files']['movie']) for cur_file in files: if os.path.getsize(cur_file) < self.minimal_filesize['media']: continue # Ignore smaller files meta = self.getMeta(cur_file) try: data['video'] = meta.get('video', self.getCodec(cur_file, self.codecs['video'])) data['audio'] = meta.get('audio', self.getCodec(cur_file, self.codecs['audio'])) data['resolution_width'] = meta.get('resolution_width', 720) data['resolution_height'] = meta.get('resolution_height', 480) data['aspect'] = meta.get('resolution_width', 720) / meta.get('resolution_height', 480) except: log.debug('Error parsing metadata: %s %s', (cur_file, traceback.format_exc())) pass if data.get('audio'): break data['quality'] = fireEvent('quality.guess', files = files, extra = data, single = True) if not data['quality']: data['quality'] = fireEvent('quality.single', 'dvdr' if group['is_dvd'] else 'dvdrip', single = True) data['quality_type'] = 'HD' if data.get('resolution_width', 0) >= 1280 else 'SD' filename = re.sub('(.cp\(tt[0-9{7}]+\))', '', files[0]) data['group'] = self.getGroup(filename) data['source'] = self.getSourceMedia(filename) return data
def all_movies(self): if self.in_progress: log.info('Search already in progress') return self.in_progress = True db = get_session() movies = db.query(Movie).filter( Movie.status.has(identifier = 'active') ).all() for movie in movies: movie_dict = movie.to_dict({ 'profile': {'types': {'quality': {}}}, 'releases': {'status': {}, 'quality': {}}, 'library': {'titles': {}, 'files':{}}, 'files': {} }) try: self.single(movie_dict) except IndexError: fireEvent('library.update', movie_dict['library']['identifier'], force = True) except: log.error('Search failed for %s: %s', (movie_dict['library']['identifier'], traceback.format_exc())) # Break if CP wants to shut down if self.shuttingDown(): break #db.close() self.in_progress = False
def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = {}, single_category = False): name = nzb['name'] size = nzb.get('size', 0) nzb_words = re.split('\W+', simplifyString(name)) qualities = fireEvent('quality.all', single = True) found = {} for quality in qualities: # Main in words if quality['identifier'] in nzb_words: found[quality['identifier']] = True # Alt in words if list(set(nzb_words) & set(quality['alternative'])): found[quality['identifier']] = True # Hack for older movies that don't contain quality tag year_name = fireEvent('scanner.name_year', name, single = True) if movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None): if size > 3000: # Assume dvdr return 'dvdr' == preferred_quality['identifier'] else: # Assume dvdrip return 'dvdrip' == preferred_quality['identifier'] # Allow other qualities for allowed in preferred_quality.get('allow'): if found.get(allowed): del found[allowed] if (len(found) == 0 and single_category): return False return not (found.get(preferred_quality['identifier']) and len(found) == 1)
def run(self): did_save = 0 for priority in self.modules: for module_name, plugin in sorted(self.modules[priority].iteritems()): # Load module try: m = getattr(self.loadModule(module_name), plugin.get("name")) log.info("Loading %s: %s" % (plugin["type"], plugin["name"])) # Save default settings for plugin/provider did_save += self.loadSettings(m, module_name, save=False) self.loadPlugins(m, plugin.get("name")) except ImportError as e: # todo:: subclass ImportError for missing requirements. if e.message.lower().startswith("missing"): log.error(e.message) pass # todo:: this needs to be more descriptive. log.error("Import error, remove the empty folder: %s" % plugin.get("module")) log.debug("Can't import %s: %s" % (module_name, traceback.format_exc())) except: log.error("Can't import %s: %s" % (module_name, traceback.format_exc())) if did_save: fireEvent("settings.save")
def __init__(self): fireEvent('scheduler.interval', identifier = 'manage.update_library', handle = self.updateLibrary, hours = 2) addEvent('manage.update', self.updateLibrary) addEvent('manage.diskspace', self.getDiskSpace) # Add files after renaming def after_rename(message = None, group = None): if not group: group = {} return self.scanFilesToLibrary(folder = group['destination_dir'], files = group['renamed_files']) addEvent('renamer.after', after_rename, priority = 110) addApiView('manage.update', self.updateLibraryView, docs = { 'desc': 'Update the library by scanning for new movies', 'params': { 'full': {'desc': 'Do a full update or just recently changed/added movies.'}, } }) addApiView('manage.progress', self.getProgress, docs = { 'desc': 'Get the progress of current manage update', 'return': {'type': 'object', 'example': """{ 'progress': False || object, total & to_go, }"""}, }) if not Env.get('dev') and self.conf('startup_scan'): addEvent('app.load', self.updateLibraryQuick)
def restatus(self, movie_id): active_status = fireEvent('status.get', 'active', single = True) done_status = fireEvent('status.get', 'done', single = True) db = get_session() m = db.query(Movie).filter_by(id = movie_id).first() if not m or len(m.library.titles) == 0: log.debug('Can\'t restatus movie, doesn\'t seem to exist.') return False log.debug('Changing status for %s', (m.library.titles[0].title)) if not m.profile: m.status_id = done_status.get('id') else: move_to_wanted = True for t in m.profile.types: for release in m.releases: if t.quality.identifier is release.quality.identifier and (release.status_id is done_status.get('id') and t.finish): move_to_wanted = False m.status_id = active_status.get('id') if move_to_wanted else done_status.get('id') db.commit() #db.close() return True
def registerStatic(self, plugin_file, add_to_head = True): # Register plugin path self.plugin_path = os.path.dirname(plugin_file) static_folder = toUnicode(os.path.join(self.plugin_path, 'static')) if not os.path.isdir(static_folder): return # Get plugin_name from PluginName s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', self.__class__.__name__) class_name = re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower() # View path path = 'static/plugin/%s/' % class_name # Add handler to Tornado Env.get('app').add_handlers(".*$", [(Env.get('web_base') + path + '(.*)', StaticFileHandler, {'path': static_folder})]) # Register for HTML <HEAD> if add_to_head: for f in glob.glob(os.path.join(self.plugin_path, 'static', '*')): ext = getExt(f) if ext in ['js', 'css']: fireEvent('register_%s' % ('script' if ext in 'js' else 'style'), path + os.path.basename(f), f)
def __init__(self): addApiView('release.manual_download', self.manualDownload, docs = { 'desc': 'Send a release manually to the downloaders', 'params': { 'id': {'type': 'id', 'desc': 'ID of the release object in release-table'} } }) addApiView('release.delete', self.deleteView, docs = { 'desc': 'Delete releases', 'params': { 'id': {'type': 'id', 'desc': 'ID of the release object in release-table'} } }) addApiView('release.ignore', self.ignore, docs = { 'desc': 'Toggle ignore, for bad or wrong releases', 'params': { 'id': {'type': 'id', 'desc': 'ID of the release object in release-table'} } }) addEvent('release.add', self.add) addEvent('release.download', self.download) addEvent('release.try_download_result', self.tryDownloadResult) addEvent('release.create_from_search', self.createFromSearch) addEvent('release.delete', self.delete) addEvent('release.clean', self.clean) addEvent('release.update_status', self.updateStatus) addEvent('release.with_status', self.withStatus) addEvent('release.for_media', self.forMedia) # Clean releases that didn't have activity in the last week addEvent('app.load', self.cleanDone, priority = 1000) fireEvent('schedule.interval', 'movie.clean_releases', self.cleanDone, hours = 12)
def updateStatus(self, release_id, status = None): if not status: return False try: db = get_db() rel = db.get('id', release_id) if rel and rel.get('status') != status: release_name = None if rel.get('files'): for file_type in rel.get('files', {}): if file_type == 'movie': for release_file in rel['files'][file_type]: release_name = os.path.basename(release_file) break if not release_name and rel.get('info'): release_name = rel['info'].get('name') #update status in Db log.debug('Marking release %s as %s', (release_name, status)) rel['status'] = status rel['last_edit'] = int(time.time()) db.update(rel) #Update all movie info as there is no release update function fireEvent('notify.frontend', type = 'release.update_status', data = rel) return True except: log.error('Failed: %s', traceback.format_exc()) return False
def scanFilesToLibrary(self, folder = None, files = None): groups = self.scan(folder = folder, files = files) for group in groups.itervalues(): if group['library']: fireEvent('release.add', group = group)
def manualDownload(self, id = None, **kwargs): db = get_db() try: release = db.get('id', id) item = release['info'] movie = db.get('id', release['media_id']) fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Snatching "%s"' % item['name']) # Get matching provider provider = fireEvent('provider.belongs_to', item['url'], provider = item.get('provider'), single = True) if item.get('protocol') != 'torrent_magnet': item['download'] = provider.loginDownload if provider.urls.get('login') else provider.download success = self.download(data = item, media = movie, manual = True) if success: fireEvent('notify.frontend', type = 'release.manual_download', data = True, message = 'Successfully snatched "%s"' % item['name']) return { 'success': success == True } except: log.error('Couldn\'t find release with id: %s: %s', (id, traceback.format_exc())) return { 'success': False }
def updateSuggestionCache(self, ignore_imdb = None, limit = 6, ignored = None, seen = None): # Combine with previous suggestion_cache cached_suggestion = self.getCache('suggestion_cached') or [] new_suggestions = [] ignored = [] if not ignored else ignored seen = [] if not seen else seen if ignore_imdb: for cs in cached_suggestion: if cs.get('imdb') != ignore_imdb: new_suggestions.append(cs) # Get new results and add them if len(new_suggestions) - 1 < limit: active_status, done_status = fireEvent('status.get', ['active', 'done'], single = True) db = get_session() active_movies = db.query(Movie) \ .join(Library) \ .with_entities(Library.identifier) \ .filter(Movie.status_id.in_([active_status.get('id'), done_status.get('id')])).all() movies = [x[0] for x in active_movies] movies.extend(seen) ignored.extend([x.get('imdb') for x in cached_suggestion]) suggestions = fireEvent('movie.suggest', movies = movies, ignore = list(set(ignored)), single = True) if suggestions: new_suggestions.extend(suggestions) self.setCache('suggestion_cached', new_suggestions, timeout = 3024000) return new_suggestions
def download(self): db = get_session() id = getParam('id') rel = db.query(Relea).filter_by(id = id).first() if rel: item = {} for info in rel.info: item[info.identifier] = info.value # Get matching provider provider = fireEvent('provider.belongs_to', item['url'], single = True) item['download'] = provider.download fireEvent('searcher.download', data = item, movie = rel.movie.to_dict({ 'profile': {'types': {'quality': {}}}, 'releases': {'status': {}, 'quality': {}}, 'library': {'titles': {}, 'files':{}}, 'files': {} })) return jsonified({ 'success': True }) else: log.error('Couldn\'t find release with id: %s' % id) return jsonified({ 'success': False })
def updateLibrary(self, full = True): last_update = float(Env.prop('manage.last_update', default = 0)) if self.isDisabled() or (last_update > time.time() - 20): return directories = self.directories() added_identifiers = [] for directory in directories: if not os.path.isdir(directory): if len(directory) > 0: log.error('Directory doesn\'t exist: %s' % directory) continue log.info('Updating manage library: %s' % directory) identifiers = fireEvent('scanner.folder', folder = directory, newer_than = last_update, single = True) added_identifiers.extend(identifiers) # Break if CP wants to shut down if self.shuttingDown(): break # If cleanup option is enabled, remove offline files from database if self.conf('cleanup') and full and not self.shuttingDown(): # Get movies with done status done_movies = fireEvent('movie.list', status = 'done', single = True) for done_movie in done_movies: if done_movie['library']['identifier'] not in added_identifiers: fireEvent('movie.delete', movie_id = done_movie['id']) Env.prop('manage.last_update', time.time())
def check(self): if self.update_version or self.isDisabled(): return log.info('Checking for new version on github for %s' % self.repo_name) if not Env.setting('development'): self.repo.fetch() current_branch = self.repo.getCurrentBranch().name for branch in self.repo.getRemoteByName('origin').getBranches(): if current_branch == branch.name: local = self.repo.getHead() remote = branch.getHead() log.info('Versions, local:%s, remote:%s' % (local.hash[:8], remote.hash[:8])) if local.getDate() < remote.getDate(): if self.conf('automatic') and not self.update_failed: if self.doUpdate(): fireEventAsync('app.crappy_restart') else: self.update_version = { 'hash': remote.hash[:8], 'date': remote.getDate(), } if self.conf('notification'): fireEvent('updater.available', message = 'A new update is available', data = self.getVersion()) self.last_check = time.time()
def searchAllView(self): results = {} for _type in fireEvent('media.types'): results[_type] = fireEvent('%s.searcher.all_view' % _type) return results
def withStatus(self, status, types = None, with_doc = True): db = get_db() if types and not isinstance(types, (list, tuple)): types = [types] status = list(status if isinstance(status, (list, tuple)) else [status]) for s in status: for ms in db.get_many('media_status', s): if with_doc: try: doc = db.get('id', ms['_id']) if types and doc.get('type') not in types: continue yield doc except (RecordDeleted, RecordNotFound): log.debug('Record not found, skipping: %s', ms['_id']) except (ValueError, EOFError): fireEvent('database.delete_corrupted', ms.get('_id'), traceback_error = traceback.format_exc(0)) else: yield ms
def getLibraryTags(self, imdb): temp = { 'in_wanted': False, 'in_library': False, } # Add release info from current library db = get_session() try: l = db.query(Library).filter_by(identifier = imdb).first() if l: # Statuses active_status, done_status = fireEvent('status.get', ['active', 'done'], single = True) for movie in l.movies: if movie.status_id == active_status['id']: temp['in_wanted'] = fireEvent('movie.get', movie.id, single = True) for release in movie.releases: if release.status_id == done_status['id']: temp['in_library'] = fireEvent('movie.get', movie.id, single = True) except: log.error('Tried getting more info on searched movies: %s', traceback.format_exc()) return temp
def updateStatus(self, id, status = None): if not status: return False db = get_session() rel = db.query(Relea).filter_by(id = id).first() if rel and status and rel.status_id != status.get('id'): item = {} for info in rel.info: item[info.identifier] = info.value if rel.files: for file_item in rel.files: if file_item.type.identifier == 'movie': release_name = os.path.basename(file_item.path) break else: release_name = item['name'] #update status in Db log.debug('Marking release %s as %s', (release_name, status.get("label"))) rel.status_id = status.get('id') rel.last_edit = int(time.time()) db.commit() #Update all movie info as there is no release update function fireEvent('notify.frontend', type = 'release.update_status.%s' % rel.id, data = status.get('id')) return True
def containsOtherQuality(self, nzb, movie_year = None, preferred_quality = {}): name = nzb['name'] size = nzb.get('size', 0) nzb_words = re.split('\W+', simplifyString(name)) qualities = fireEvent('quality.all', single = True) found = {} for quality in qualities: # Main in words if quality['identifier'] in nzb_words: found[quality['identifier']] = True # Alt in words if list(set(nzb_words) & set(quality['alternative'])): found[quality['identifier']] = True # Hack for older movies that don't contain quality tag year_name = fireEvent('scanner.name_year', name, single = True) if len(found) == 0 and movie_year < datetime.datetime.now().year - 3 and not year_name.get('year', None): if size > 3000: # Assume dvdr log.info('Quality was missing in name, assuming it\'s a DVD-R based on the size: %s', (size)) found['dvdr'] = True else: # Assume dvdrip log.info('Quality was missing in name, assuming it\'s a DVD-Rip based on the size: %s', (size)) found['dvdrip'] = True # Allow other qualities for allowed in preferred_quality.get('allow'): if found.get(allowed): del found[allowed] return not (found.get(preferred_quality['identifier']) and len(found) == 1)
def tryNextRelease(self, movie_id, manual = False): snatched_status = fireEvent('status.get', 'snatched', single = True) ignored_status = fireEvent('status.get', 'ignored', single = True) try: db = get_session() rels = db.query(Release).filter_by( status_id = snatched_status.get('id'), movie_id = movie_id ).all() for rel in rels: rel.status_id = ignored_status.get('id') db.commit() movie_dict = fireEvent('movie.get', movie_id, single = True) log.info('Trying next release for: %s', getTitle(movie_dict['library'])) fireEvent('searcher.single', movie_dict) return True except: log.error('Failed searching for next release: %s', traceback.format_exc()) return False
def run(self): did_save = 0 for priority in sorted(self.modules): for module_name, plugin in sorted(self.modules[priority].iteritems()): # Load module try: if plugin.get('name')[:2] == '__': continue m = self.loadModule(module_name) if m is None: continue log.info('Loading %s: %s', (plugin['type'], plugin['name'])) # Save default settings for plugin/provider did_save += self.loadSettings(m, module_name, save = False) self.loadPlugins(m, plugin.get('name')) except ImportError as e: # todo:: subclass ImportError for missing requirements. if e.message.lower().startswith("missing"): log.error(e.message) pass # todo:: this needs to be more descriptive. log.error('Import error, remove the empty folder: %s', plugin.get('module')) log.debug('Can\'t import %s: %s', (module_name, traceback.format_exc())) except: log.error('Can\'t import %s: %s', (module_name, traceback.format_exc())) if did_save: fireEvent('settings.save')
def __init__(self): addEvent('searcher.all', self.allMovies) addEvent('searcher.single', self.single) addEvent('searcher.correct_movie', self.correctMovie) addEvent('searcher.download', self.download) addEvent('searcher.try_next_release', self.tryNextRelease) addApiView('searcher.try_next', self.tryNextReleaseView, docs = { 'desc': 'Marks the snatched results as ignored and try the next best release', 'params': { 'id': {'desc': 'The id of the movie'}, }, }) addApiView('searcher.full_search', self.allMoviesView, docs = { 'desc': 'Starts a full search for all wanted movies', }) addApiView('searcher.progress', self.getProgress, docs = { 'desc': 'Get the progress of current full search', 'return': {'type': 'object', 'example': """{ 'progress': False || object, total & to_go, }"""}, }) # Schedule cronjob fireEvent('schedule.cron', 'searcher.all', self.allMovies, day = self.conf('cron_day'), hour = self.conf('cron_hour'), minute = self.conf('cron_minute'))
def cleanDone(self): log.debug('Removing releases from dashboard') now = time.time() week = 262080 done_status, available_status, snatched_status, downloaded_status, ignored_status = \ fireEvent('status.get', ['done', 'available', 'snatched', 'downloaded', 'ignored'], single = True) db = get_session() # get movies last_edit more than a week ago media = db.query(Media) \ .filter(Media.status_id == done_status.get('id'), Media.last_edit < (now - week)) \ .all() for item in media: for rel in item.releases: # Remove all available releases if rel.status_id in [available_status.get('id')]: fireEvent('release.delete', id = rel.id, single = True) # Set all snatched and downloaded releases to ignored to make sure they are ignored when re-adding the move elif rel.status_id in [snatched_status.get('id'), downloaded_status.get('id')]: self.updateStatus(id = rel.id, status = ignored_status) db.expire_all()
def append(self, result): new_result = self.fillResult(result) is_correct = fireEvent('searcher.correct_release', new_result, self.media, self.quality, imdb_results = self.kwargs.get('imdb_results', False), single = True) if is_correct and new_result['id'] not in self.result_ids: is_correct_weight = float(is_correct) new_result['score'] += fireEvent('score.calculate', new_result, self.media, single = True) old_score = new_result['score'] new_result['score'] = int(old_score * is_correct_weight) log.info2('Found correct release with weight %.02f, old_score(%d) now scaled to score(%d)', ( is_correct_weight, old_score, new_result['score'] )) self.found(new_result) self.result_ids.append(result['id']) super(ResultList, self).append(new_result)
def updateLibrary(self, full = False): if self.isDisabled() or (self.last_update > time.time() - 20): return directories = self.directories() for directory in directories: if not os.path.isdir(directory): if len(directory) > 0: log.error('Directory doesn\'t exist: %s' % directory) continue log.info('Updating manage library: %s' % directory) fireEvent('scanner.folder', folder = directory) # If cleanup option is enabled, remove offline files from database if self.conf('cleanup'): db = get_session() files_in_path = db.query(File).filter(File.path.like(directory + '%%')).filter_by(available = 0).all() [db.delete(x) for x in files_in_path] db.commit() db.remove() # Break if CP wants to shut down if self.shuttingDown(): break self.last_update = time.time()
def getSoonView(self, limit_offset=None, random=False, late=False, **kwargs): db = get_db() now = time.time() # Get profiles first, determine pre or post theater profiles = fireEvent('profile.all', single=True) pre_releases = fireEvent('quality.pre_releases', single=True) # See what the profile contain and cache it profile_pre = {} for profile in profiles: contains = {} for q_identifier in profile.get('qualities', []): contains['theater' if q_identifier in pre_releases else 'dvd'] = True profile_pre[profile.get('_id')] = contains # Add limit limit = 12 if limit_offset: splt = splitString(limit_offset) if isinstance( limit_offset, (str, unicode)) else limit_offset limit = tryInt(splt[0]) # Get all active medias active_ids = [ x['_id'] for x in fireEvent( 'media.with_status', 'active', with_doc=False, single=True) ] medias = [] if len(active_ids) > 0: # Order by title or randomize if not random: orders_ids = db.all('media_title') active_ids = [ x['_id'] for x in orders_ids if x['_id'] in active_ids ] else: rndm.shuffle(active_ids) for media_id in active_ids: try: media = db.get('id', media_id) except RecordDeleted: log.debug('Record already deleted: %s', media_id) continue except RecordNotFound: log.debug('Record not found: %s', media_id) continue pp = profile_pre.get(media.get('profile_id')) if not pp: continue eta = media['info'].get('release_date', {}) or {} coming_soon = False # Theater quality if pp.get('theater') and fireEvent( 'movie.searcher.could_be_released', True, eta, media['info']['year'], single=True): coming_soon = 'theater' elif pp.get('dvd') and fireEvent( 'movie.searcher.could_be_released', False, eta, media['info']['year'], single=True): coming_soon = 'dvd' if coming_soon: # Don't list older movies eta_date = eta.get(coming_soon) eta_3month_passed = eta_date < ( now - 7862400) # Release was more than 3 months ago if (not late and not eta_3month_passed) or \ (late and eta_3month_passed): add = True # Check if it doesn't have any releases if late: media['releases'] = fireEvent('release.for_media', media['_id'], single=True) for release in media.get('releases', []): if release.get('status') in [ 'snatched', 'available', 'seeding', 'downloaded' ]: add = False break if add: medias.append(media) if len(medias) >= limit: break return { 'success': True, 'empty': len(medias) == 0, 'movies': medias, }
def __init__(self): # Initialize this type super(MovieBase, self).__init__() self.initType() addApiView( 'movie.search', self.search, docs={ 'desc': 'Search the movie providers for a movie', 'params': { 'q': { 'desc': 'The (partial) movie name you want to search for' }, }, 'return': { 'type': 'object', 'example': """{ 'success': True, 'empty': bool, any movies returned or not, 'movies': array, movies found, }""" } }) addApiView( 'movie.list', self.listView, docs={ 'desc': 'List movies in wanted list', 'params': { 'status': { 'type': 'array or csv', 'desc': 'Filter movie by status. Example:"active,done"' }, 'release_status': { 'type': 'array or csv', 'desc': 'Filter movie by status of its releases. Example:"snatched,available"' }, 'limit_offset': { 'desc': 'Limit and offset the movie list. Examples: "50" or "50,30"' }, 'starts_with': { 'desc': 'Starts with these characters. Example: "a" returns all movies starting with the letter "a"' }, 'search': { 'desc': 'Search movie title' }, }, 'return': { 'type': 'object', 'example': """{ 'success': True, 'empty': bool, any movies returned or not, 'movies': array, movies found, }""" } }) addApiView('movie.get', self.getView, docs={ 'desc': 'Get a movie by id', 'params': { 'id': { 'desc': 'The id of the movie' }, } }) addApiView('movie.refresh', self.refresh, docs={ 'desc': 'Refresh a movie by id', 'params': { 'id': { 'desc': 'Movie ID(s) you want to refresh.', 'type': 'int (comma separated)' }, } }) addApiView('movie.available_chars', self.charView) addApiView( 'movie.add', self.addView, docs={ 'desc': 'Add new movie to the wanted list', 'params': { 'identifier': { 'desc': 'IMDB id of the movie your want to add.' }, 'profile_id': { 'desc': 'ID of quality profile you want the add the movie in. If empty will use the default profile.' }, 'title': { 'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.' }, } }) addApiView( 'movie.edit', self.edit, docs={ 'desc': 'Add new movie to the wanted list', 'params': { 'id': { 'desc': 'Movie ID(s) you want to edit.', 'type': 'int (comma separated)' }, 'profile_id': { 'desc': 'ID of quality profile you want the edit the movie to.' }, 'default_title': { 'desc': 'Movie title to use for searches. Has to be one of the titles returned by movie.search.' }, } }) addApiView('movie.delete', self.deleteView, docs={ 'desc': 'Delete a movie from the wanted list', 'params': { 'id': { 'desc': 'Movie ID(s) you want to delete.', 'type': 'int (comma separated)' }, 'delete_from': { 'desc': 'Delete movie from this page', 'type': 'string: all (default), wanted, manage' }, } }) addEvent('movie.add', self.add) addEvent('movie.delete', self.delete) addEvent('movie.get', self.get) addEvent('movie.list', self.list) addEvent('movie.restatus', self.restatus) # Clean releases that didn't have activity in the last week addEvent('app.load', self.cleanReleases) fireEvent('schedule.interval', 'movie.clean_releases', self.cleanReleases, hours=4)
def list(self, status=None, release_status=None, limit_offset=None, starts_with=None, search=None, order=None): db = get_session() # Make a list from string if status and not isinstance(status, (list, tuple)): status = [status] if release_status and not isinstance(release_status, (list, tuple)): release_status = [release_status] # query movie ids q = db.query(Movie) \ .with_entities(Movie.id) \ .group_by(Movie.id) # Filter on movie status if status and len(status) > 0: statuses = fireEvent('status.get', status, single=len(status) > 1) statuses = [s.get('id') for s in statuses] q = q.filter(Movie.status_id.in_(statuses)) # Filter on release status if release_status and len(release_status) > 0: q = q.join(Movie.releases) statuses = fireEvent('status.get', release_status, single=len(release_status) > 1) statuses = [s.get('id') for s in statuses] q = q.filter(Release.status_id.in_(statuses)) # Only join when searching / ordering if starts_with or search or order != 'release_order': q = q.join(Movie.library, Library.titles) \ .filter(LibraryTitle.default == True) # Add search filters filter_or = [] if starts_with: starts_with = toUnicode(starts_with.lower()) if starts_with in ascii_lowercase: filter_or.append( LibraryTitle.simple_title.startswith(starts_with)) else: ignore = [] for letter in ascii_lowercase: ignore.append( LibraryTitle.simple_title.startswith( toUnicode(letter))) filter_or.append(not_(or_(*ignore))) if search: filter_or.append( LibraryTitle.simple_title.like('%%' + search + '%%')) if len(filter_or) > 0: q = q.filter(or_(*filter_or)) total_count = q.count() if total_count == 0: return 0, [] if order == 'release_order': q = q.order_by(desc(Release.last_edit)) else: q = q.order_by(asc(LibraryTitle.simple_title)) if limit_offset: splt = splitString(limit_offset) if isinstance( limit_offset, (str, unicode)) else limit_offset limit = splt[0] offset = 0 if len(splt) is 1 else splt[1] q = q.limit(limit).offset(offset) # Get all movie_ids in sorted order movie_ids = [m.id for m in q.all()] # List release statuses releases = db.query(Release) \ .filter(Release.movie_id.in_(movie_ids)) \ .all() release_statuses = dict((m, set()) for m in movie_ids) releases_count = dict((m, 0) for m in movie_ids) for release in releases: release_statuses[release.movie_id].add( '%d,%d' % (release.status_id, release.quality_id)) releases_count[release.movie_id] += 1 # Get main movie data q2 = db.query(Movie) \ .options(joinedload_all('library.titles')) \ .options(joinedload_all('library.files')) \ .options(joinedload_all('status')) \ .options(joinedload_all('files')) q2 = q2.filter(Movie.id.in_(movie_ids)) results = q2.all() # Create dict by movie id movie_dict = {} for movie in results: movie_dict[movie.id] = movie # List movies based on movie_ids order movies = [] for movie_id in movie_ids: releases = [] for r in release_statuses.get(movie_id): x = splitString(r) releases.append({'status_id': x[0], 'quality_id': x[1]}) # Merge releases with movie dict movies.append( mergeDicts( movie_dict[movie_id].to_dict({ 'library': { 'titles': {}, 'files': {} }, 'files': {}, }), { 'releases': releases, 'releases_count': releases_count.get(movie_id), })) db.expire_all() return total_count, movies
def search(self, movie, quality): results = [] if self.isDisabled(): return results q = '"%s %s" %s' % (simplifyString(getTitle( movie['library'])), movie['library']['year'], quality.get('identifier')) for ignored in Env.setting('ignored_words', 'searcher').split(','): q = '%s -%s' % (q, ignored.strip()) params = { 'q': q, 'ig': '1', 'rpp': 200, 'st': 1, 'sp': 1, 'ns': 1, } cache_key = 'nzbclub.%s.%s.%s' % (movie['library']['identifier'], quality.get('identifier'), q) data = self.getCache(cache_key, self.urls['search'] % tryUrlencode(params)) if data: try: try: data = XMLTree.fromstring(data) nzbs = self.getElements(data, 'channel/item') except Exception, e: log.debug('%s, %s', (self.getName(), e)) return results for nzb in nzbs: nzbclub_id = tryInt( self.getTextElement( nzb, "link").split('/nzb_view/')[1].split('/')[0]) enclosure = self.getElement(nzb, "enclosure").attrib size = enclosure['length'] date = self.getTextElement(nzb, "pubDate") def extra_check(item): full_description = self.getCache( 'nzbclub.%s' % nzbclub_id, item['detail_url'], cache_timeout=25920000) for ignored in [ 'ARCHIVE inside ARCHIVE', 'Incomplete', 'repair impossible' ]: if ignored in full_description: log.info( 'Wrong: Seems to be passworded or corrupted files: %s', new['name']) return False return True new = { 'id': nzbclub_id, 'type': 'nzb', 'provider': self.getName(), 'name': toUnicode(self.getTextElement(nzb, "title")), 'age': self.calculateAge( int(time.mktime(parse(date).timetuple()))), 'size': tryInt(size) / 1024 / 1024, 'url': enclosure['url'].replace(' ', '_'), 'download': self.download, 'detail_url': self.getTextElement(nzb, "link"), 'description': '', 'get_more_info': self.getMoreInfo, 'extra_check': extra_check } is_correct_movie = fireEvent('searcher.correct_movie', nzb=new, movie=movie, quality=quality, imdb_results=False, single=True) if is_correct_movie: new['score'] = fireEvent('score.calculate', new, movie, single=True) results.append(new) self.found(new) return results except SyntaxError: log.error('Failed to parse XML response from NZBClub')
def update(self, identifier, default_title='', force=False): if self.shuttingDown(): return db = get_session() library = db.query(SeasonLibrary).filter_by( identifier=identifier).first() done_status = fireEvent('status.get', 'done', single=True) if library: library_dict = library.to_dict(self.default_dict) do_update = True parent_identifier = None if library.parent is not None: parent_identifier = library.parent.identifier if library.status_id == done_status.get('id') and not force: do_update = False season_params = {'season_identifier': identifier} info = fireEvent('season.info', merge=True, identifier=parent_identifier, params=season_params) # Don't need those here try: del info['in_wanted'] except: pass try: del info['in_library'] except: pass if not info or len(info) == 0: log.error('Could not update, no movie info to work with: %s', identifier) return False # Main info if do_update: library.plot = toUnicode(info.get('plot', '')) library.tagline = toUnicode(info.get('tagline', '')) library.year = info.get('year', 0) library.status_id = done_status.get('id') library.season_number = tryInt(info.get('seasonnumber', None)) library.info.update(info) db.commit() # Titles [db.delete(title) for title in library.titles] db.commit() titles = info.get('titles', []) log.debug('Adding titles: %s', titles) counter = 0 for title in titles: if not title: continue title = toUnicode(title) t = LibraryTitle( title=title, simple_title=self.simplifyTitle(title), # XXX: default was None; so added a quick hack since we don't really need titiles for seasons anyway #default = (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == u'' and toUnicode(titles[0]) == title) default=True, ) library.titles.append(t) counter += 1 db.commit() # Files images = info.get('images', []) for image_type in ['poster']: for image in images.get(image_type, []): if not isinstance(image, (str, unicode)): continue file_path = fireEvent('file.download', url=image, single=True) if file_path: file_obj = fireEvent('file.add', path=file_path, type_tuple=('image', image_type), single=True) try: file_obj = db.query(File).filter_by( id=file_obj.get('id')).one() library.files.append(file_obj) db.commit() break except: log.debug('Failed to attach to library: %s', traceback.format_exc()) library_dict = library.to_dict(self.default_dict) db.expire_all() return library_dict
def search(self, movie, quality): results = [] if self.isDisabled(): return results q = '"%s %s" %s' % (simplifyString(getTitle(movie['library'])), movie['library']['year'], quality.get('identifier')) arguments = tryUrlencode({ 'q': q, 'age': Env.setting('retention', 'nzb'), 'sort': 'agedesc', 'minsize': quality.get('size_min'), 'maxsize': quality.get('size_max'), 'rating': 1, 'max': 250, 'more': 1, 'complete': 1, }) url = "%s?%s" % (self.urls['api'], arguments) cache_key = 'nzbindex.%s.%s' % (movie['library']['identifier'], quality.get('identifier')) data = self.getCache(cache_key, url) if data: try: try: data = XMLTree.fromstring(data) nzbs = self.getElements(data, 'channel/item') except Exception, e: log.debug('%s, %s', (self.getName(), e)) return results for nzb in nzbs: enclosure = self.getElement(nzb, 'enclosure').attrib nzbindex_id = int(self.getTextElement(nzb, "link").split('/')[4]) try: description = self.getTextElement(nzb, "description") except: description = '' def extra_check(new): if '#c20000' in new['description'].lower(): log.info('Wrong: Seems to be passworded: %s', new['name']) return False return True new = { 'id': nzbindex_id, 'type': 'nzb', 'provider': self.getName(), 'download': self.download, 'name': self.getTextElement(nzb, "title"), 'age': self.calculateAge(int(time.mktime(parse(self.getTextElement(nzb, "pubDate")).timetuple()))), 'size': tryInt(enclosure['length']) / 1024 / 1024, 'url': enclosure['url'], 'detail_url': enclosure['url'].replace('/download/', '/release/'), 'description': description, 'get_more_info': self.getMoreInfo, 'extra_check': extra_check, 'check_nzb': True, } is_correct_movie = fireEvent('searcher.correct_movie', nzb = new, movie = movie, quality = quality, imdb_results = False, single = True) if is_correct_movie: new['score'] = fireEvent('score.calculate', new, movie, single = True) results.append(new) self.found(new) return results except:
def getProgressForAll(self): progress = fireEvent('searcher.progress', merge=True) return progress
def determineMedia(self, group, release_download=None): # Get imdb id from downloader imdb_id = release_download and release_download.get('imdb_id') if imdb_id: log.debug('Found movie via imdb id from it\'s download id: %s', release_download.get('imdb_id')) files = group['files'] # Check for CP(imdb_id) string in the file paths if not imdb_id: for cur_file in files['movie']: imdb_id = self.getCPImdb(cur_file) if imdb_id: log.debug('Found movie via CP tag: %s', cur_file) break # Check and see if nfo contains the imdb-id nfo_file = None if not imdb_id: try: for nf in files['nfo']: imdb_id = getImdb(nf, check_inside=True) if imdb_id: log.debug('Found movie via nfo file: %s', nf) nfo_file = nf break except: pass # Check and see if filenames contains the imdb-id if not imdb_id: try: for filetype in files: for filetype_file in files[filetype]: imdb_id = getImdb(filetype_file) if imdb_id: log.debug('Found movie via imdb in filename: %s', nfo_file) break except: pass # Search based on identifiers if not imdb_id: for identifier in group['identifiers']: if len(identifier) > 2: try: filename = list(group['files'].get('movie'))[0] except: filename = None name_year = self.getReleaseNameYear( identifier, file_name=filename if not group['is_dvd'] else None) if name_year.get('name') and name_year.get('year'): search_q = '%(name)s %(year)s' % name_year movie = fireEvent('movie.search', q=search_q, merge=True, limit=1) # Try with other if len(movie) == 0 and name_year.get( 'other') and name_year['other'].get( 'name') and name_year['other'].get('year'): search_q2 = '%(name)s %(year)s' % name_year.get( 'other') if search_q2 != search_q: movie = fireEvent('movie.search', q=search_q2, merge=True, limit=1) if len(movie) > 0: imdb_id = movie[0].get('imdb') log.debug('Found movie via search: %s', identifier) if imdb_id: break else: log.debug('Identifier to short to use for search: %s', identifier) if imdb_id: try: db = get_db() return db.get('media', 'imdb-%s' % imdb_id, with_doc=True)['doc'] except: log.debug('Movie "%s" not in library, just getting info', imdb_id) return { 'identifier': imdb_id, 'info': fireEvent('movie.info', identifier=imdb_id, merge=True, extended=False) } log.error( 'No imdb_id found for %s. Add a NFO file with IMDB id or add the year to the filename.', group['identifiers']) return {}
def scan(self, folder=None, files=None, release_download=None, simple=False, newer_than=0, return_ignored=True, check_file_date=True, on_found=None): folder = sp(folder) if not folder or not os.path.isdir(folder): log.error('Folder doesn\'t exists: %s', folder) return {} # Get movie "master" files movie_files = {} leftovers = [] # Scan all files of the folder if no files are set if not files: try: files = [] for root, dirs, walk_files in os.walk(folder, followlinks=True): files.extend([ sp(os.path.join(sp(root), ss(filename))) for filename in walk_files ]) # Break if CP wants to shut down if self.shuttingDown(): break except: log.error('Failed getting files from %s: %s', (folder, traceback.format_exc())) log.debug('Found %s files to scan and group in %s', (len(files), folder)) else: check_file_date = False files = [sp(x) for x in files] for file_path in files: if not os.path.exists(file_path): continue # Remove ignored files if self.isSampleFile(file_path): leftovers.append(file_path) continue elif not self.keepFile(file_path): continue is_dvd_file = self.isDVDFile(file_path) if self.filesizeBetween( file_path, self.file_sizes['movie'] ) or is_dvd_file: # Minimal 300MB files or is DVD file # Normal identifier identifier = self.createStringIdentifier( file_path, folder, exclude_filename=is_dvd_file) identifiers = [identifier] # Identifier with quality quality = fireEvent('quality.guess', files=[file_path], size=self.getFileSize(file_path), single=True) if not is_dvd_file else { 'identifier': 'dvdr' } if quality: identifier_with_quality = '%s %s' % ( identifier, quality.get('identifier', '')) identifiers = [identifier_with_quality, identifier] if not movie_files.get(identifier): movie_files[identifier] = { 'unsorted_files': [], 'identifiers': identifiers, 'is_dvd': is_dvd_file, } movie_files[identifier]['unsorted_files'].append(file_path) else: leftovers.append(file_path) # Break if CP wants to shut down if self.shuttingDown(): break # Cleanup del files # Sort reverse, this prevents "Iron man 2" from getting grouped with "Iron man" as the "Iron Man 2" # files will be grouped first. leftovers = set(sorted(leftovers, reverse=True)) # Group files minus extension ignored_identifiers = [] for identifier, group in movie_files.items(): if identifier not in group['identifiers'] and len(identifier) > 0: group['identifiers'].append(identifier) log.debug('Grouping files: %s', identifier) has_ignored = 0 for file_path in list(group['unsorted_files']): ext = getExt(file_path) wo_ext = file_path[:-(len(ext) + 1)] found_files = set([i for i in leftovers if wo_ext in i]) group['unsorted_files'].extend(found_files) leftovers = leftovers - found_files has_ignored += 1 if ext == 'ignore' else 0 if has_ignored == 0: for file_path in list(group['unsorted_files']): ext = getExt(file_path) has_ignored += 1 if ext == 'ignore' else 0 if has_ignored > 0: ignored_identifiers.append(identifier) # Break if CP wants to shut down if self.shuttingDown(): break # Create identifiers for all leftover files path_identifiers = {} for file_path in leftovers: identifier = self.createStringIdentifier(file_path, folder) if not path_identifiers.get(identifier): path_identifiers[identifier] = [] path_identifiers[identifier].append(file_path) # Group the files based on the identifier delete_identifiers = [] for identifier, found_files in path_identifiers.items(): log.debug('Grouping files on identifier: %s', identifier) group = movie_files.get(identifier) if group: group['unsorted_files'].extend(found_files) delete_identifiers.append(identifier) # Remove the found files from the leftover stack leftovers = leftovers - set(found_files) # Break if CP wants to shut down if self.shuttingDown(): break # Cleaning up used for identifier in delete_identifiers: if path_identifiers.get(identifier): del path_identifiers[identifier] del delete_identifiers # Group based on folder delete_identifiers = [] for identifier, found_files in path_identifiers.items(): log.debug('Grouping files on foldername: %s', identifier) for ff in found_files: new_identifier = self.createStringIdentifier( os.path.dirname(ff), folder) group = movie_files.get(new_identifier) if group: group['unsorted_files'].extend([ff]) delete_identifiers.append(identifier) # Remove the found files from the leftover stack leftovers -= leftovers - set([ff]) # Break if CP wants to shut down if self.shuttingDown(): break # leftovers should be empty if leftovers: log.debug('Some files are still left over: %s', leftovers) # Cleaning up used for identifier in delete_identifiers: if path_identifiers.get(identifier): del path_identifiers[identifier] del delete_identifiers # Make sure we remove older / still extracting files valid_files = {} while True and not self.shuttingDown(): try: identifier, group = movie_files.popitem() except: break # Check if movie is fresh and maybe still unpacking, ignore files newer than 1 minute if check_file_date: files_too_new, time_string = self.checkFilesChanged( group['unsorted_files']) if files_too_new: log.info( 'Files seem to be still unpacking or just unpacked (created on %s), ignoring for now: %s', (time_string, identifier)) # Delete the unsorted list del group['unsorted_files'] continue # Only process movies newer than x if newer_than and newer_than > 0: has_new_files = False for cur_file in group['unsorted_files']: file_time = self.getFileTimes(cur_file) if file_time[0] > newer_than or file_time[1] > newer_than: has_new_files = True break if not has_new_files: log.debug( 'None of the files have changed since %s for %s, skipping.', (time.ctime(newer_than), identifier)) # Delete the unsorted list del group['unsorted_files'] continue valid_files[identifier] = group del movie_files total_found = len(valid_files) # Make sure only one movie was found if a download ID is provided if release_download and total_found == 0: log.info( 'Download ID provided (%s), but no groups found! Make sure the download contains valid media files (fully extracted).', release_download.get('imdb_id')) elif release_download and total_found > 1: log.info( 'Download ID provided (%s), but more than one group found (%s). Ignoring Download ID...', (release_download.get('imdb_id'), len(valid_files))) release_download = None # Determine file types processed_movies = {} while True and not self.shuttingDown(): try: identifier, group = valid_files.popitem() except: break if return_ignored is False and identifier in ignored_identifiers: log.debug('Ignore file found, ignoring release: %s', identifier) total_found -= 1 continue # Group extra (and easy) files first group['files'] = { 'movie_extra': self.getMovieExtras(group['unsorted_files']), 'subtitle': self.getSubtitles(group['unsorted_files']), 'subtitle_extra': self.getSubtitlesExtras(group['unsorted_files']), 'nfo': self.getNfo(group['unsorted_files']), 'trailer': self.getTrailers(group['unsorted_files']), 'leftover': set(group['unsorted_files']), } # Media files if group['is_dvd']: group['files']['movie'] = self.getDVDFiles( group['unsorted_files']) else: group['files']['movie'] = self.getMediaFiles( group['unsorted_files']) if len(group['files']['movie']) == 0: log.error('Couldn\'t find any movie files for %s', identifier) total_found -= 1 continue log.debug('Getting metadata for %s', identifier) group['meta_data'] = self.getMetaData( group, folder=folder, release_download=release_download) # Subtitle meta group['subtitle_language'] = self.getSubtitleLanguage( group) if not simple else {} # Get parent dir from movie files for movie_file in group['files']['movie']: group['parentdir'] = os.path.dirname(movie_file) group['dirname'] = None folder_names = group['parentdir'].replace(folder, '').split( os.path.sep) folder_names.reverse() # Try and get a proper dirname, so no "A", "Movie", "Download" etc for folder_name in folder_names: if folder_name.lower( ) not in self.ignore_names and len(folder_name) > 2: group['dirname'] = folder_name break break # Leftover "sorted" files for file_type in group['files']: if not file_type is 'leftover': group['files']['leftover'] -= set( group['files'][file_type]) group['files'][file_type] = list(group['files'][file_type]) group['files']['leftover'] = list(group['files']['leftover']) # Delete the unsorted list del group['unsorted_files'] # Determine movie group['media'] = self.determineMedia( group, release_download=release_download) if not group['media']: log.error('Unable to determine media: %s', group['identifiers']) else: group['identifier'] = getIdentifier( group['media']) or group['media']['info'].get('imdb') processed_movies[identifier] = group # Notify parent & progress on something found if on_found: on_found(group, total_found, len(valid_files)) # Wait for all the async events calm down a bit while threading.activeCount() > 100 and not self.shuttingDown(): log.debug('Too many threads active, waiting a few seconds') time.sleep(10) if len(processed_movies) > 0: log.info('Found %s movies in the folder %s', (len(processed_movies), folder)) else: log.debug('Found no movies in the folder %s', folder) return processed_movies
def download(self, id=None, **kwargs): db = get_session() snatched_status, done_status = fireEvent('status.get', ['snatched', 'done'], single=True) rel = db.query(Relea).filter_by(id=id).first() if rel: item = {} for info in rel.info: item[info.identifier] = info.value fireEvent('notify.frontend', type='release.download', data=True, message='Snatching "%s"' % item['name']) # Get matching provider provider = fireEvent('provider.belongs_to', item['url'], provider=item.get('provider'), single=True) if item.get('protocol', item.get('type')) != 'torrent_magnet': item['download'] = provider.loginDownload if provider.urls.get( 'login') else provider.download success = fireEvent('searcher.download', data=item, movie=rel.movie.to_dict({ 'profile': { 'types': { 'quality': {} } }, 'releases': { 'status': {}, 'quality': {} }, 'library': { 'titles': {}, 'files': {} }, 'files': {} }), manual=True, single=True) if success: db.expunge_all() rel = db.query(Relea).filter_by( id=id).first() # Get release again if rel.status_id != done_status.get('id'): rel.status_id = snatched_status.get('id') db.commit() fireEvent('notify.frontend', type='release.download', data=True, message='Successfully snatched "%s"' % item['name']) return {'success': success} else: log.error('Couldn\'t find release with id: %s', id) return {'success': False}
def search(self, movie, quality): results = [] if self.isDisabled(): return results url = self.urls['search'] % (self.getCatId( quality['identifier'])[0], self.getCatId(quality['identifier'])[0]) q = '%s %s' % (movie['library']['identifier'], quality.get('identifier')) arguments = tryUrlencode({ 'search': q, 'method': 1, }) url = "%s&%s" % (url, arguments) # Do login for the cookies if not self.login_opener and not self.login(): return results cache_key = 'sceneaccess.%s.%s' % (movie['library']['identifier'], quality.get('identifier')) data = self.getCache(cache_key, url, opener=self.login_opener) if data: html = BeautifulSoup(data) try: resultsTable = html.find('table', attrs={'id': 'torrents-table'}) if resultsTable is None: return results entries = resultsTable.find_all('tr', attrs={'class': 'tt_row'}) for result in entries: link = result.find('td', attrs={ 'class': 'ttr_name' }).find('a') url = result.find('td', attrs={'class': 'td_dl'}).find('a') leechers = result.find('td', attrs={ 'class': 'ttr_leechers' }).find('a') id = link['href'].replace('details?id=', '') new = { 'id': id, 'type': 'torrent', 'check_nzb': False, 'description': '', 'provider': self.getName(), 'name': link['title'], 'url': self.urls['download'] % url['href'], 'detail_url': self.urls['detail'] % id, 'size': self.parseSize( result.find('td', attrs={ 'class': 'ttr_size' }).contents[0]), 'seeders': tryInt( result.find('td', attrs={ 'class': 'ttr_seeders' }).find('a').string), 'leechers': tryInt(leechers.string) if leechers else 0, 'download': self.loginDownload, 'get_more_info': self.getMoreInfo, } new['score'] = fireEvent('score.calculate', new, movie, single=True) is_correct_movie = fireEvent('searcher.correct_movie', nzb=new, movie=movie, quality=quality, imdb_results=False, single=True) if is_correct_movie: results.append(new) self.found(new) return results except: log.error('Failed getting results from %s: %s', (self.getName(), traceback.format_exc())) return []
def getMetaData(self, group, folder='', release_download=None): data = {} files = list(group['files']['movie']) for cur_file in files: if not self.filesizeBetween(cur_file, self.file_sizes['movie']): continue # Ignore smaller files if not data.get( 'audio'): # Only get metadata from first media file meta = self.getMeta(cur_file) try: data['titles'] = meta.get('titles', []) data['video'] = meta.get( 'video', self.getCodec(cur_file, self.codecs['video'])) data['audio'] = meta.get( 'audio', self.getCodec(cur_file, self.codecs['audio'])) data['audio_channels'] = meta.get('audio_channels', 2.0) if meta.get('resolution_width'): data['resolution_width'] = meta.get('resolution_width') data['resolution_height'] = meta.get( 'resolution_height') data['aspect'] = round( float(meta.get('resolution_width')) / meta.get('resolution_height', 1), 2) else: data.update(self.getResolution(cur_file)) except: log.debug('Error parsing metadata: %s %s', (cur_file, traceback.format_exc())) pass data['size'] = data.get('size', 0) + self.getFileSize(cur_file) data['quality'] = None quality = fireEvent('quality.guess', size=data.get('size'), files=files, extra=data, single=True) # Use the quality that we snatched but check if it matches our guess if release_download and release_download.get('quality'): data['quality'] = fireEvent('quality.single', release_download.get('quality'), single=True) data['quality']['is_3d'] = release_download.get('is_3d', 0) if data['quality']['identifier'] != quality['identifier']: log.info( 'Different quality snatched than detected for %s: %s vs. %s. Assuming snatched quality is correct.', (files[0], data['quality']['identifier'], quality['identifier'])) if data['quality']['is_3d'] != quality['is_3d']: log.info( 'Different 3d snatched than detected for %s: %s vs. %s. Assuming snatched 3d is correct.', (files[0], data['quality']['is_3d'], quality['is_3d'])) if not data['quality']: data['quality'] = quality if not data['quality']: data['quality'] = fireEvent( 'quality.single', 'dvdr' if group['is_dvd'] else 'dvdrip', single=True) data['quality_type'] = 'HD' if data.get( 'resolution_width', 0) >= 1280 or data['quality'].get('hd') else 'SD' filename = re.sub(self.cp_imdb, '', files[0]) data['group'] = self.getGroup(filename[len(folder):]) data['source'] = self.getSourceMedia(filename) if data['quality'].get('is_3d', 0): data['3d_type'] = self.get3dType(filename) return data
class Release(Plugin): def __init__(self): addEvent('release.add', self.add) addApiView('release.download', self.download, docs={ 'desc': 'Send a release manually to the downloaders', 'params': { 'id': { 'type': 'id', 'desc': 'ID of the release object in release-table' } } }) addApiView('release.delete', self.deleteView, docs={ 'desc': 'Delete releases', 'params': { 'id': { 'type': 'id', 'desc': 'ID of the release object in release-table' } } }) addApiView('release.ignore', self.ignore, docs={ 'desc': 'Toggle ignore, for bad or wrong releases', 'params': { 'id': { 'type': 'id', 'desc': 'ID of the release object in release-table' } } }) addEvent('release.delete', self.delete) addEvent('release.clean', self.clean) def add(self, group): db = get_session() identifier = '%s.%s.%s' % (group['library']['identifier'], group['meta_data'].get('audio', 'unknown'), group['meta_data']['quality']['identifier']) # Add movie done_status = fireEvent('status.get', 'done', single=True) movie = db.query(Movie).filter_by( library_id=group['library'].get('id')).first() if not movie: movie = Movie(library_id=group['library'].get('id'), profile_id=0, status_id=done_status.get('id')) db.add(movie) db.commit() # Add Release snatched_status = fireEvent('status.get', 'snatched', single=True) rel = db.query(Relea).filter( or_( Relea.identifier == identifier, and_( Relea.identifier.startswith( group['library']['identifier']), Relea.status_id == snatched_status.get('id')))).first() if not rel: rel = Relea(identifier=identifier, movie=movie, quality_id=group['meta_data']['quality'].get('id'), status_id=done_status.get('id')) db.add(rel) db.commit() # Add each file type for type in group['files']: for cur_file in group['files'][type]: added_file = self.saveFile(cur_file, type=type, include_media_info=type is 'movie') try: added_file = db.query(File).filter_by( id=added_file.get('id')).one() rel.files.append(added_file) db.commit() except Exception, e: log.debug('Failed to attach "%s" to release: %s', (cur_file, e)) fireEvent('movie.restatus', movie.id) #db.close() return True
def add(self, params=None, force_readd=True, search_after=True, update_library=False, status_id=None): if not params: params = {} if not params.get('identifier'): msg = 'Can\'t add movie without imdb identifier.' log.error(msg) fireEvent('notify.frontend', type='movie.is_tvshow', message=msg) return False else: try: is_movie = fireEvent('movie.is_movie', identifier=params.get('identifier'), single=True) if not is_movie: msg = 'Can\'t add movie, seems to be a TV show.' log.error(msg) fireEvent('notify.frontend', type='movie.is_tvshow', message=msg) return False except: pass library = fireEvent('library.add.movie', single=True, attrs=params, update_after=update_library) # Status status_active, snatched_status, ignored_status, done_status, downloaded_status = \ fireEvent('status.get', ['active', 'snatched', 'ignored', 'done', 'downloaded'], single = True) default_profile = fireEvent('profile.default', single=True) cat_id = params.get('category_id') try: db = get_session() m = db.query(Media).filter_by(library_id=library.get('id')).first() added = True do_search = False search_after = search_after and self.conf('search_on_add', section='moviesearcher') if not m: m = Media( library_id=library.get('id'), profile_id=params.get('profile_id', default_profile.get('id')), status_id=status_id if status_id else status_active.get('id'), category_id=tryInt(cat_id) if cat_id is not None and tryInt(cat_id) > 0 else None, ) db.add(m) db.commit() onComplete = None if search_after: onComplete = self.createOnComplete(m.id) fireEventAsync('library.update.movie', params.get('identifier'), default_title=params.get('title', ''), on_complete=onComplete) search_after = False elif force_readd: # Clean snatched history for release in m.releases: if release.status_id in [ downloaded_status.get('id'), snatched_status.get('id'), done_status.get('id') ]: if params.get('ignore_previous', False): release.status_id = ignored_status.get('id') else: fireEvent('release.delete', release.id, single=True) m.profile_id = params.get('profile_id', default_profile.get('id')) m.category_id = tryInt( cat_id) if cat_id is not None and tryInt(cat_id) > 0 else ( m.category_id or None) else: log.debug('Movie already exists, not updating: %s', params) added = False if force_readd: m.status_id = status_id if status_id else status_active.get( 'id') m.last_edit = int(time.time()) do_search = True db.commit() # Remove releases available_status = fireEvent('status.get', 'available', single=True) for rel in m.releases: if rel.status_id is available_status.get('id'): db.delete(rel) db.commit() movie_dict = m.to_dict(self.default_dict) if do_search and search_after: onComplete = self.createOnComplete(m.id) onComplete() if added: if params.get('title'): message = 'Successfully added "%s" to your wanted list.' % params.get( 'title', '') else: title = getTitle(m.library) if title: message = 'Successfully added "%s" to your wanted list.' % title else: message = 'Succesfully added to your wanted list.' fireEvent('notify.frontend', type='movie.added', data=movie_dict, message=message) return movie_dict except: log.error('Failed deleting media: %s', traceback.format_exc()) db.rollback() finally: db.close()
def search(self, movie, quality): results = [] if self.isDisabled() or not self.isAvailable(self.urls['api'] + '?test' + self.getApiExt()): return results cat_id = self.getCatId(quality.get('identifier')) arguments = urlencode({ 'action': 'search', 'q': simplifyString(movie['library']['titles'][0]['title']), 'catid': cat_id[0], 'i': self.conf('id'), 'h': self.conf('api_key'), }) url = "%s?%s" % (self.urls['api'], arguments) cache_key = 'nzbs.%s.%s' % (movie['library'].get('identifier'), str(cat_id)) data = self.getCache(cache_key) if not data: data = self.urlopen(url) self.setCache(cache_key, data) if not data: log.error('Failed to get data from %s.' % url) return results if data: try: try: data = XMLTree.fromstring(data) nzbs = self.getElements(data, 'channel/item') except Exception, e: log.debug('%s, %s' % (self.getName(), e)) return results for nzb in nzbs: id = int( self.getTextElement(nzb, "link").partition('nzbid=')[2]) new = { 'id': id, 'type': 'nzb', 'provider': self.getName(), 'name': self.getTextElement(nzb, "title"), 'age': self.calculateAge( int( time.mktime( parse(self.getTextElement( nzb, "pubDate")).timetuple()))), 'size': self.parseSize( self.getTextElement(nzb, "description").split( '</a><br />')[1].split('">')[1]), 'url': self.urls['download'] % (id, self.getApiExt()), 'download': self.download, 'detail_url': self.urls['detail'] % id, 'description': self.getTextElement(nzb, "description"), 'check_nzb': True, } new['score'] = fireEvent('score.calculate', new, movie, single=True) is_correct_movie = fireEvent('searcher.correct_movie', nzb=new, movie=movie, quality=quality, imdb_results=False, single_category=False, single=True) if is_correct_movie: results.append(new) self.found(new) return results except SyntaxError: log.error('Failed to parse XML response from NZBMatrix.com')
def determineMovie(self, group, download_info=None): # Get imdb id from downloader imdb_id = download_info and download_info.get('imdb_id') if imdb_id: log.debug('Found movie via imdb id from it\'s download id: %s', download_info.get('imdb_id')) files = group['files'] # Check for CP(imdb_id) string in the file paths if not imdb_id: for cur_file in files['movie']: imdb_id = self.getCPImdb(cur_file) if imdb_id: log.debug('Found movie via CP tag: %s', cur_file) break # Check and see if nfo contains the imdb-id if not imdb_id: try: for nfo_file in files['nfo']: imdb_id = getImdb(nfo_file) if imdb_id: log.debug('Found movie via nfo file: %s', nfo_file) break except: pass # Check and see if filenames contains the imdb-id if not imdb_id: try: for filetype in files: for filetype_file in files[filetype]: imdb_id = getImdb(filetype_file, check_inside=False) if imdb_id: log.debug('Found movie via imdb in filename: %s', nfo_file) break except: pass # Check if path is already in db if not imdb_id: db = get_session() for cur_file in files['movie']: f = db.query(File).filter_by(path=toUnicode(cur_file)).first() try: imdb_id = f.library[0].identifier log.debug('Found movie via database: %s', cur_file) break except: pass # Search based on OpenSubtitleHash if not imdb_id and not group['is_dvd']: for cur_file in files['movie']: movie = fireEvent('movie.by_hash', file=cur_file, merge=True) if len(movie) > 0: imdb_id = movie[0]['imdb'] if imdb_id: log.debug('Found movie via OpenSubtitleHash: %s', cur_file) break # Search based on identifiers if not imdb_id: for identifier in group['identifiers']: if len(identifier) > 2: try: filename = list(group['files'].get('movie'))[0] except: filename = None name_year = self.getReleaseNameYear( identifier, file_name=filename if not group['is_dvd'] else None) if name_year.get('name') and name_year.get('year'): movie = fireEvent('movie.search', q='%(name)s %(year)s' % name_year, merge=True, limit=1) if len(movie) > 0: imdb_id = movie[0]['imdb'] log.debug('Found movie via search: %s', cur_file) if imdb_id: break else: log.debug('Identifier to short to use for search: %s', identifier) if imdb_id: return fireEvent('library.add', attrs={'identifier': imdb_id}, update_after=False, single=True) log.error( 'No imdb_id found for %s. Add a NFO file with IMDB id or add the year to the filename.', group['identifiers']) return {}
def add(self, params = {}, force_readd = True, search_after = True): if not params.get('identifier'): log.error('Can\'t add movie without imdb identifier.') return False library = fireEvent('library.add', single = True, attrs = params, update_after = False) # Status status_active = fireEvent('status.add', 'active', single = True) status_snatched = fireEvent('status.add', 'snatched', single = True) default_profile = fireEvent('profile.default', single = True) db = get_session() m = db.query(Movie).filter_by(library_id = library.get('id')).first() added = True do_search = False if not m: m = Movie( library_id = library.get('id'), profile_id = params.get('profile_id', default_profile.get('id')), status_id = status_active.get('id'), ) db.add(m) db.commit() onComplete = None if search_after: onComplete = self.createOnComplete(m.id) fireEventAsync('library.update', params.get('identifier'), default_title = params.get('title', ''), on_complete = onComplete) search_after = False elif force_readd: # Clean snatched history for release in m.releases: if release.status_id == status_snatched.get('id'): release.delete() m.profile_id = params.get('profile_id', default_profile.get('id')) else: log.debug('Movie already exists, not updating: %s', params) added = False if force_readd: m.status_id = status_active.get('id') do_search = True db.commit() # Remove releases available_status = fireEvent('status.get', 'available', single = True) for rel in m.releases: if rel.status_id is available_status.get('id'): db.delete(rel) db.commit() movie_dict = m.to_dict(self.default_dict) if do_search and search_after: onComplete = self.createOnComplete(m.id) onComplete() if added: fireEvent('notify.frontend', type = 'movie.added', data = movie_dict, message = 'Successfully added "%s" to your wanted list.' % params.get('title', '')) #db.close() return movie_dict
def scan(self, folder = None, files = None, simple = False, newer_than = 0, on_found = None): folder = ss(os.path.normpath(folder)) if not folder or not os.path.isdir(folder): log.error('Folder doesn\'t exists: %s', folder) return {} # Get movie "master" files movie_files = {} leftovers = [] # Scan all files of the folder if no files are set if not files: check_file_date = True try: files = [] for root, dirs, walk_files in os.walk(folder): for filename in walk_files: files.append(os.path.join(root, filename)) except: log.error('Failed getting files from %s: %s', (folder, traceback.format_exc())) else: check_file_date = False files = [ss(x) for x in files] db = get_session() for file_path in files: if not os.path.exists(file_path): continue # Remove ignored files if self.isSampleFile(file_path): leftovers.append(file_path) continue elif not self.keepFile(file_path): continue is_dvd_file = self.isDVDFile(file_path) if os.path.getsize(file_path) > self.minimal_filesize['media'] or is_dvd_file: # Minimal 300MB files or is DVD file # Normal identifier identifier = self.createStringIdentifier(file_path, folder, exclude_filename = is_dvd_file) identifiers = [identifier] # Identifier with quality quality = fireEvent('quality.guess', [file_path], single = True) if not is_dvd_file else {'identifier':'dvdr'} if quality: identifier_with_quality = '%s %s' % (identifier, quality.get('identifier', '')) identifiers = [identifier_with_quality, identifier] if not movie_files.get(identifier): movie_files[identifier] = { 'unsorted_files': [], 'identifiers': identifiers, 'is_dvd': is_dvd_file, } movie_files[identifier]['unsorted_files'].append(file_path) else: leftovers.append(file_path) # Break if CP wants to shut down if self.shuttingDown(): break # Cleanup del files # Sort reverse, this prevents "Iron man 2" from getting grouped with "Iron man" as the "Iron Man 2" # files will be grouped first. leftovers = set(sorted(leftovers, reverse = True)) # Group files minus extension for identifier, group in movie_files.iteritems(): if identifier not in group['identifiers'] and len(identifier) > 0: group['identifiers'].append(identifier) log.debug('Grouping files: %s', identifier) for file_path in group['unsorted_files']: wo_ext = file_path[:-(len(getExt(file_path)) + 1)] found_files = set([i for i in leftovers if wo_ext in i]) group['unsorted_files'].extend(found_files) leftovers = leftovers - found_files # Break if CP wants to shut down if self.shuttingDown(): break # Create identifiers for all leftover files path_identifiers = {} for file_path in leftovers: identifier = self.createStringIdentifier(file_path, folder) if not path_identifiers.get(identifier): path_identifiers[identifier] = [] path_identifiers[identifier].append(file_path) # Group the files based on the identifier delete_identifiers = [] for identifier, found_files in path_identifiers.iteritems(): log.debug('Grouping files on identifier: %s', identifier) group = movie_files.get(identifier) if group: group['unsorted_files'].extend(found_files) delete_identifiers.append(identifier) # Remove the found files from the leftover stack leftovers = leftovers - set(found_files) # Break if CP wants to shut down if self.shuttingDown(): break # Cleaning up used for identifier in delete_identifiers: if path_identifiers.get(identifier): del path_identifiers[identifier] del delete_identifiers # Group based on folder delete_identifiers = [] for identifier, found_files in path_identifiers.iteritems(): log.debug('Grouping files on foldername: %s', identifier) for ff in found_files: new_identifier = self.createStringIdentifier(os.path.dirname(ff), folder) group = movie_files.get(new_identifier) if group: group['unsorted_files'].extend([ff]) delete_identifiers.append(identifier) # Remove the found files from the leftover stack leftovers = leftovers - set([ff]) # Break if CP wants to shut down if self.shuttingDown(): break # Cleaning up used for identifier in delete_identifiers: if path_identifiers.get(identifier): del path_identifiers[identifier] del delete_identifiers # Make sure we remove older / still extracting files valid_files = {} while True and not self.shuttingDown(): try: identifier, group = movie_files.popitem() except: break # Check if movie is fresh and maybe still unpacking, ignore files new then 1 minute file_too_new = False for cur_file in group['unsorted_files']: if not os.path.isfile(cur_file): file_too_new = time.time() break file_time = [os.path.getmtime(cur_file), os.path.getctime(cur_file)] for t in file_time: if t > time.time() - 60: file_too_new = tryInt(time.time() - t) break if file_too_new: break if check_file_date and file_too_new: try: time_string = time.ctime(file_time[0]) except: try: time_string = time.ctime(file_time[1]) except: time_string = 'unknown' log.info('Files seem to be still unpacking or just unpacked (created on %s), ignoring for now: %s', (time_string, identifier)) # Delete the unsorted list del group['unsorted_files'] continue # Only process movies newer than x if newer_than and newer_than > 0: has_new_files = False for cur_file in group['unsorted_files']: file_time = [os.path.getmtime(cur_file), os.path.getctime(cur_file)] if file_time[0] > newer_than or file_time[1] > newer_than: has_new_files = True break if not has_new_files: log.debug('None of the files have changed since %s for %s, skipping.', (time.ctime(newer_than), identifier)) # Delete the unsorted list del group['unsorted_files'] continue valid_files[identifier] = group del movie_files # Determine file types processed_movies = {} total_found = len(valid_files) while True and not self.shuttingDown(): try: identifier, group = valid_files.popitem() except: break # Group extra (and easy) files first # images = self.getImages(group['unsorted_files']) group['files'] = { 'movie_extra': self.getMovieExtras(group['unsorted_files']), 'subtitle': self.getSubtitles(group['unsorted_files']), 'subtitle_extra': self.getSubtitlesExtras(group['unsorted_files']), 'nfo': self.getNfo(group['unsorted_files']), 'trailer': self.getTrailers(group['unsorted_files']), #'backdrop': images['backdrop'], 'leftover': set(group['unsorted_files']), } # Media files if group['is_dvd']: group['files']['movie'] = self.getDVDFiles(group['unsorted_files']) else: group['files']['movie'] = self.getMediaFiles(group['unsorted_files']) if len(group['files']['movie']) == 0: log.error('Couldn\'t find any movie files for %s', identifier) continue log.debug('Getting metadata for %s', identifier) group['meta_data'] = self.getMetaData(group, folder = folder) # Subtitle meta group['subtitle_language'] = self.getSubtitleLanguage(group) if not simple else {} # Get parent dir from movie files for movie_file in group['files']['movie']: group['parentdir'] = os.path.dirname(movie_file) group['dirname'] = None folder_names = group['parentdir'].replace(folder, '').split(os.path.sep) folder_names.reverse() # Try and get a proper dirname, so no "A", "Movie", "Download" etc for folder_name in folder_names: if folder_name.lower() not in self.ignore_names and len(folder_name) > 2: group['dirname'] = folder_name break break # Leftover "sorted" files for file_type in group['files']: if not file_type is 'leftover': group['files']['leftover'] -= set(group['files'][file_type]) # Delete the unsorted list del group['unsorted_files'] # Determine movie group['library'] = self.determineMovie(group) if not group['library']: log.error('Unable to determine movie: %s', group['identifiers']) else: movie = db.query(Movie).filter_by(library_id = group['library']['id']).first() group['movie_id'] = None if not movie else movie.id processed_movies[identifier] = group # Notify parent & progress on something found if on_found: on_found(group, total_found, total_found - len(processed_movies)) if len(processed_movies) > 0: log.info('Found %s movies in the folder %s', (len(processed_movies), folder)) else: log.debug('Found no movies in the folder %s', (folder)) return processed_movies
def addSingleRefreshView(self): for media_type in fireEvent('media.types', merge=True): addApiView('%s.refresh' % media_type, self.refresh)
def notifyFront(): db = get_session() movie = db.query(Movie).filter_by(id = movie_id).first() fireEvent('notify.frontend', type = 'movie.update.%s' % movie.id, data = movie.to_dict(self.default_dict))
def signal_handler(*args, **kwargs): fireEvent('app.shutdown', single=True)
def list(self, types=None, status=None, release_status=None, status_or=False, limit_offset=None, starts_with=None, search=None): db = get_db() # Make a list from string if status and not isinstance(status, (list, tuple)): status = [status] if release_status and not isinstance(release_status, (list, tuple)): release_status = [release_status] if types and not isinstance(types, (list, tuple)): types = [types] # query media ids if types: all_media_ids = set() for media_type in types: all_media_ids = all_media_ids.union( set([ x['_id'] for x in db.get_many('media_by_type', media_type) ])) else: all_media_ids = set([x['_id'] for x in db.all('media')]) media_ids = list(all_media_ids) filter_by = {} # Filter on movie status if status and len(status) > 0: filter_by['media_status'] = set() for media_status in fireEvent('media.with_status', status, with_doc=False, single=True): filter_by['media_status'].add(media_status.get('_id')) # Filter on release status if release_status and len(release_status) > 0: filter_by['release_status'] = set() for release_status in fireEvent('release.with_status', release_status, with_doc=False, single=True): filter_by['release_status'].add(release_status.get('media_id')) # Add search filters if starts_with: filter_by['starts_with'] = set() starts_with = toUnicode(starts_with.lower())[0] starts_with = starts_with if starts_with in ascii_lowercase else '#' filter_by['starts_with'] = [ x['_id'] for x in db.get_many('media_startswith', starts_with) ] # Filter with search query if search: filter_by['search'] = [ x['_id'] for x in db.get_many('media_search_title', search) ] if status_or and 'media_status' in filter_by and 'release_status' in filter_by: filter_by['status'] = list(filter_by['media_status']) + list( filter_by['release_status']) del filter_by['media_status'] del filter_by['release_status'] # Filter by combining ids for x in filter_by: media_ids = [n for n in media_ids if n in filter_by[x]] total_count = len(media_ids) if total_count == 0: return 0, [] offset = 0 limit = -1 if limit_offset: splt = splitString(limit_offset) if isinstance( limit_offset, (str, unicode)) else limit_offset limit = tryInt(splt[0]) offset = tryInt(0 if len(splt) is 1 else splt[1]) # List movies based on title order medias = [] for m in db.all('media_title'): media_id = m['_id'] if media_id not in media_ids: continue if offset > 0: offset -= 1 continue media = fireEvent('media.get', media_id, single=True) # Merge releases with movie dict medias.append(media) # remove from media ids media_ids.remove(media_id) if len(media_ids) == 0 or len(medias) == limit: break return total_count, medias
def correctRelease(self, nzb=None, media=None, quality=None, **kwargs): if media.get('type') != 'movie': return media_title = fireEvent('searcher.get_search_title', media, single=True) imdb_results = kwargs.get('imdb_results', False) retention = Env.setting('retention', section='nzb') if nzb.get('seeders') is None and 0 < retention < nzb.get('age', 0): log.info2( 'Wrong: Outside retention, age is %s, needs %s or lower: %s', (nzb['age'], retention, nzb['name'])) return False # Check for required and ignored words if not fireEvent( 'searcher.correct_words', nzb['name'], media, single=True): return False preferred_quality = quality if quality else fireEvent( 'quality.single', identifier=quality['identifier'], single=True) # Contains lower quality string contains_other = fireEvent('searcher.contains_other_quality', nzb, movie_year=media['info']['year'], preferred_quality=preferred_quality, single=True) if contains_other != False: log.info2( 'Wrong: %s, looking for %s, found %s', (nzb['name'], quality['label'], [x for x in contains_other] if contains_other else 'no quality')) return False # Contains lower quality string if not fireEvent('searcher.correct_3d', nzb, preferred_quality=preferred_quality, single=True): log.info2( 'Wrong: %s, %slooking for %s in 3D', (nzb['name'], ('' if preferred_quality['custom'].get('3d') else 'NOT '), quality['label'])) return False # File to small if nzb['size'] and tryInt(preferred_quality['size_min']) > tryInt( nzb['size']): log.info2( 'Wrong: "%s" is too small to be %s. %sMB instead of the minimal of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_min'])) return False # File to large if nzb['size'] and tryInt(preferred_quality['size_max']) < tryInt( nzb['size']): log.info2( 'Wrong: "%s" is too large to be %s. %sMB instead of the maximum of %sMB.', (nzb['name'], preferred_quality['label'], nzb['size'], preferred_quality['size_max'])) return False # Provider specific functions get_more = nzb.get('get_more_info') if get_more: get_more(nzb) extra_check = nzb.get('extra_check') if extra_check and not extra_check(nzb): return False if imdb_results: return True # Check if nzb contains imdb link if getImdb(nzb.get('description', '')) == getIdentifier(media): return True for raw_title in media['info']['titles']: for movie_title in possibleTitles(raw_title): movie_words = re.split('\W+', simplifyString(movie_title)) if fireEvent('searcher.correct_name', nzb['name'], movie_title, single=True): # if no IMDB link, at least check year range 1 if len(movie_words) > 2 and fireEvent( 'searcher.correct_year', nzb['name'], media['info']['year'], 1, single=True): return True # if no IMDB link, at least check year if len(movie_words) <= 2 and fireEvent( 'searcher.correct_year', nzb['name'], media['info']['year'], 0, single=True): return True log.info("Wrong: %s, undetermined naming. Looking for '%s (%s)'", (nzb['name'], media_title, media['info']['year'])) return False
def handler(): fireEvent(event, media_id=media_id, on_complete=self.createOnComplete(media_id))
def buildUrl(self, media, page, cats): return (tryUrlencode( '"%s"' % fireEvent('library.query', media, single=True)), page, ','.join(str(x) for x in cats))
def __init__(self): super(CoreNotifier, self).__init__() addEvent('notify', self.notify) addEvent('notify.frontend', self.frontend) addApiView( 'notification.markread', self.markAsRead, docs={ 'desc': 'Mark notifications as read', 'params': { 'ids': { 'desc': 'Notification id you want to mark as read. All if ids is empty.', 'type': 'int (comma separated)' }, }, }) addApiView( 'notification.list', self.listView, docs={ 'desc': 'Get list of notifications', 'params': { 'limit_offset': { 'desc': 'Limit and offset the notification list. Examples: "50" or "50,30"' }, }, 'return': { 'type': 'object', 'example': """{ 'success': True, 'empty': bool, any notification returned or not, 'notifications': array, notifications found, }""" } }) addNonBlockApiView('notification.listener', (self.addListener, self.removeListener)) addApiView('notification.listener', self.listener) fireEvent('schedule.interval', 'core.check_messages', self.checkMessages, hours=12, single=True) fireEvent('schedule.interval', 'core.clean_messages', self.cleanMessages, seconds=15, single=True) addEvent('app.load', self.clean) addEvent('app.load', self.checkMessages) self.messages = [] self.listeners = [] self.m_lock = threading.Lock()
def update(self, identifier, default_title = '', force = False): if self.shuttingDown(): return db = get_session() library = db.query(ShowLibrary).filter_by(identifier = identifier).first() done_status = fireEvent('status.get', 'done', single = True) if library: library_dict = library.to_dict(self.default_dict) do_update = True info = fireEvent('show.info', merge = True, identifier = identifier) # Don't need those here try: del info['in_wanted'] except: pass try: del info['in_library'] except: pass if not info or len(info) == 0: log.error('Could not update, no show info to work with: %s', identifier) return False # Main info if do_update: library.plot = toUnicode(info.get('plot', '')) library.tagline = toUnicode(info.get('tagline', '')) library.year = info.get('year', 0) library.status_id = done_status.get('id') library.show_status = toUnicode(info.get('status', '').lower()) library.airs_time = info.get('airs_time', None) # Bits days_of_week_map = { u'Monday': 1, u'Tuesday': 2, u'Wednesday': 4, u'Thursday': 8, u'Friday': 16, u'Saturday': 32, u'Sunday': 64, u'Daily': 127, } try: library.airs_dayofweek = days_of_week_map.get(info.get('airs_dayofweek')) except: library.airs_dayofweek = 0 try: library.last_updated = int(info.get('lastupdated')) except: library.last_updated = int(time.time()) library.info.update(info) db.commit() # Titles [db.delete(title) for title in library.titles] db.commit() titles = info.get('titles', []) log.debug('Adding titles: %s', titles) counter = 0 for title in titles: if not title: continue title = toUnicode(title) t = LibraryTitle( title = title, simple_title = self.simplifyTitle(title), default = (len(default_title) == 0 and counter == 0) or len(titles) == 1 or title.lower() == toUnicode(default_title.lower()) or (toUnicode(default_title) == u'' and toUnicode(titles[0]) == title) ) library.titles.append(t) counter += 1 db.commit() # Files images = info.get('images', []) for image_type in ['poster']: for image in images.get(image_type, []): if not isinstance(image, (str, unicode)): continue file_path = fireEvent('file.download', url = image, single = True) if file_path: file_obj = fireEvent('file.add', path = file_path, type_tuple = ('image', image_type), single = True) try: file_obj = db.query(File).filter_by(id = file_obj.get('id')).one() library.files.append(file_obj) db.commit() break except: log.debug('Failed to attach to library: %s', traceback.format_exc()) library_dict = library.to_dict(self.default_dict) db.expire_all() return library_dict
def single(self, movie, search_protocols=None, manual=False, force_download=False): # Find out search type try: if not search_protocols: search_protocols = fireEvent('searcher.protocols', single=True) except SearchSetupError: return if not movie['profile_id'] or (movie['status'] == 'done' and not manual): log.debug( 'Movie doesn\'t have a profile or already done, assuming in manage tab.' ) return pre_releases = fireEvent('quality.pre_releases', single=True) release_dates = fireEvent('movie.update_release_dates', movie['_id'], merge=True) found_releases = [] previous_releases = movie.get('releases', []) too_early_to_search = [] outside_eta_results = 0 alway_search = self.conf('always_search') ignore_eta = manual default_title = getTitle(movie) if not default_title: log.error( 'No proper info found for movie, removing it from library to cause it from having more issues.' ) fireEvent('media.delete', movie['_id'], single=True) return fireEvent('notify.frontend', type='movie.searcher.started', data={'_id': movie['_id']}, message='Searching for "%s"' % default_title) # Ignore eta once every 7 days if not alway_search: prop_name = 'last_ignored_eta.%s' % movie['_id'] last_ignored_eta = float(Env.prop(prop_name, default=0)) if last_ignored_eta > time.time() - 604800: ignore_eta = True Env.prop(prop_name, value=time.time()) db = get_db() profile = db.get('id', movie['profile_id']) ret = False index = 0 for q_identifier in profile.get('qualities'): quality_custom = { 'index': index, 'quality': q_identifier, 'finish': profile['finish'][index], 'wait_for': tryInt(profile['wait_for'][index]), '3d': profile['3d'][index] if profile.get('3d') else False } index += 1 could_not_be_released = not self.couldBeReleased( q_identifier in pre_releases, release_dates, movie['info']['year']) if not alway_search and could_not_be_released: too_early_to_search.append(q_identifier) # Skip release, if ETA isn't ignored if not ignore_eta: continue has_better_quality = 0 # See if better quality is available for release in movie.get('releases', []): if release['status'] not in ['available', 'ignored', 'failed']: is_higher = fireEvent('quality.ishigher', \ {'identifier': q_identifier, 'is_3d': quality_custom.get('3d', 0)}, \ {'identifier': release['quality'], 'is_3d': release.get('is_3d', 0)}, \ profile, single = True) if is_higher != 'higher': has_better_quality += 1 # Don't search for quality lower then already available. if has_better_quality > 0: log.info( 'Better quality (%s) already available or snatched for %s', (q_identifier, default_title)) fireEvent('media.restatus', movie['_id']) break quality = fireEvent('quality.single', identifier=q_identifier, single=True) log.info('Search for %s in %s%s', (default_title, quality['label'], ' ignoring ETA' if alway_search or ignore_eta else '')) # Extend quality with profile customs quality['custom'] = quality_custom results = fireEvent('searcher.search', search_protocols, movie, quality, single=True) or [] results_count = len(results) if results_count == 0: log.debug('Nothing found for %s in %s', (default_title, quality['label'])) # Keep track of releases found outside ETA window outside_eta_results += results_count if could_not_be_released else 0 # Check if movie isn't deleted while searching if not fireEvent('media.get', movie.get('_id'), single=True): break # Add them to this movie releases list found_releases += fireEvent('release.create_from_search', results, movie, quality, single=True) # Don't trigger download, but notify user of available releases if could_not_be_released: if results_count > 0: log.debug( 'Found %s releases for "%s", but ETA isn\'t correct yet.', (results_count, default_title)) # Try find a valid result and download it if (force_download or not could_not_be_released) and fireEvent( 'release.try_download_result', results, movie, quality_custom, single=True): ret = True # Remove releases that aren't found anymore temp_previous_releases = [] for release in previous_releases: if release.get('status') == 'available' and release.get( 'identifier') not in found_releases: fireEvent('release.delete', release.get('_id'), single=True) else: temp_previous_releases.append(release) previous_releases = temp_previous_releases del temp_previous_releases # Break if CP wants to shut down if self.shuttingDown() or ret: break if len(too_early_to_search) > 0: log.info2('Too early to search for %s, %s', (too_early_to_search, default_title)) if outside_eta_results > 0: message = 'Found %s releases for "%s" before ETA. Select and download via the dashboard.' % ( outside_eta_results, default_title) log.info(message) if not manual: fireEvent('media.available', message=message, data={}) fireEvent('notify.frontend', type='movie.searcher.ended', data={'_id': movie['_id']}) return ret
def setCrons(self): fireEvent('schedule.remove', 'updater.check', single = True) if self.isEnabled(): fireEvent('schedule.interval', 'updater.check', self.autoUpdate, hours = 6) self.autoUpdate() # Check after enabling
def setCrons(self): fireEvent('schedule.interval', 'charts.update_cache', self.updateViewCache, hours = self.conf('update_interval', default = 12))