def isSubFolder(sub_folder, base_folder): if base_folder and sub_folder: base = sp(os.path.realpath(base_folder)) + os.path.sep subfolder = sp(os.path.realpath(sub_folder)) + os.path.sep return os.path.commonprefix([subfolder, base]) == base return False
def _createType(self, meta_name, root, movie_info, group, file_type, i): # Get file path camelcase_method = underscoreToCamel(file_type.capitalize()) name = getattr(self, "get" + camelcase_method + "Name")(meta_name, root, i) if name and (self.conf("meta_" + file_type) or self.conf("meta_" + file_type) is None): # Get file content content = getattr(self, "get" + camelcase_method)(movie_info=movie_info, data=group, i=i) if content: log.debug("Creating %s file: %s", (file_type, name)) if os.path.isfile(content): content = sp(content) name = sp(name) if not os.path.exists(os.path.dirname(name)): os.makedirs(os.path.dirname(name)) shutil.copy2(content, name) shutil.copyfile(content, name) # Try and copy stats seperately try: shutil.copystat(content, name) except: pass else: self.createFile(name, content) group["renamed_files"].append(name) try: os.chmod(sp(name), Env.getPermission("file")) except: log.debug("Failed setting permissions for %s: %s", (name, traceback.format_exc()))
def searchSingle(self, group): if self.isDisabled(): return try: available_languages = sum(group['subtitle_language'].values(), []) downloaded = [] files = [toUnicode(x) for x in group['files']['movie']] log.debug('Searching for subtitles for: %s', files) for lang in self.getLanguages(): if lang not in available_languages: download = subliminal.download_subtitles(files, multi = True, force = self.conf('force'), languages = [lang], services = self.services, cache_dir = Env.get('cache_dir')) for subtitle in download: downloaded.extend(download[subtitle]) for d_sub in downloaded: log.info('Found subtitle (%s): %s', (d_sub.language.alpha2, files)) group['files']['subtitle'].append(sp(d_sub.path)) group['before_rename'].append(sp(d_sub.path)) group['subtitle_language'][sp(d_sub.path)] = [d_sub.language.alpha2] return True except: log.error('Failed searching for subtitle: %s', (traceback.format_exc())) return False
def getAllDownloadStatus(self, ids): """ Get status of all active downloads :param ids: list of (mixed) downloader ids Used to match the releases for this downloader as there could be other downloaders active that it should ignore :return: list of releases """ log.debug('Checking qBittorrent download status.') if not self.connect(): return [] try: torrents = self.qb.get_torrents() release_downloads = ReleaseDownloadList(self) for torrent in torrents: if torrent.hash in ids: torrent.update_general() # get extra info torrent_filelist = torrent.get_files() torrent_files = [] torrent_dir = os.path.join(torrent.save_path, torrent.name) if os.path.isdir(torrent_dir): torrent.save_path = torrent_dir if len(torrent_filelist) > 1 and os.path.isdir(torrent_dir): # multi file torrent, path.isdir check makes sure we're not in the root download folder for root, _, files in os.walk(torrent.save_path): for f in files: torrent_files.append(sp(os.path.join(root, f))) else: # multi or single file placed directly in torrent.save_path for f in torrent_filelist: file_path = os.path.join(torrent.save_path, f.name) if os.path.isfile(file_path): torrent_files.append(sp(file_path)) release_downloads.append({ 'id': torrent.hash, 'name': torrent.name, 'status': self.getTorrentStatus(torrent), 'seed_ratio': torrent.ratio, 'original_status': torrent.state, 'timeleft': torrent.progress * 100 if torrent.progress else -1, # percentage 'folder': sp(torrent.save_path), 'files': torrent_files }) return release_downloads except Exception as e: log.error('Failed to get status from qBittorrent: %s', e) return []
def getUserDir(): try: import pwd if not os.environ['HOME']: os.environ['HOME'] = sp(pwd.getpwuid(os.geteuid()).pw_dir) except: pass return sp(os.path.expanduser('~'))
def getAllDownloadStatus(self, ids): """ Get status of all active downloads :param ids: list of (mixed) downloader ids Used to match the releases for this downloader as there could be other downloaders active that it should ignore :return: list of releases """ log.debug('Checking rTorrent download status.') if not self.connect(): return [] try: torrents = self.rt.get_torrents() release_downloads = ReleaseDownloadList(self) for torrent in torrents: if torrent.info_hash in ids: torrent_directory = os.path.normpath(torrent.directory) torrent_files = [] for file in torrent.get_files(): if not os.path.normpath(file.path).startswith(torrent_directory): file_path = os.path.join(torrent_directory, file.path.lstrip('/')) else: file_path = file.path torrent_files.append(sp(file_path)) release_downloads.append({ 'id': torrent.info_hash, 'name': torrent.name, 'status': self.getTorrentStatus(torrent), 'seed_ratio': torrent.ratio, 'original_status': torrent.state, 'timeleft': str(timedelta(seconds = float(torrent.left_bytes) / torrent.down_rate)) if torrent.down_rate > 0 else -1, 'folder': sp(torrent.directory), 'files': torrent_files }) return release_downloads except Exception as err: log.error('Failed to get status from rTorrent: %s', err) return []
def processComplete(self, release_download, delete_files): log.debug('Requesting rTorrent to remove the torrent %s%s.', (release_download['name'], ' and cleanup the downloaded files' if delete_files else '')) if not self.connect(): return False torrent = self.rt.find_torrent(release_download['id']) if torrent is None: return False if delete_files: for file_item in torrent.get_files(): # will only delete files, not dir/sub-dir os.unlink(os.path.join(torrent.directory, file_item.path)) if torrent.is_multi_file() and torrent.directory.endswith(torrent.name): # Remove empty directories bottom up try: for path, _, _ in os.walk(sp(torrent.directory), topdown = False): os.rmdir(path) except OSError: log.info('Directory "%s" contains extra files, unable to remove', torrent.directory) torrent.erase() # just removes the torrent, doesn't delete data return True
def clean(self, release_id): try: db = get_db() rel = db.get('id', release_id) raw_files = rel.get('files') if len(raw_files) == 0: self.delete(rel['_id']) else: files = {} for file_type in raw_files: for release_file in raw_files.get(file_type, []): if os.path.isfile(sp(release_file)): if file_type not in files: files[file_type] = [] files[file_type].append(release_file) rel['files'] = files db.update(rel) return True except: log.error('Failed: %s', traceback.format_exc()) return False
def removePyc(folder, only_excess = True, show_logs = True): folder = sp(folder) for root, dirs, files in os.walk(folder): pyc_files = filter(lambda filename: filename.endswith('.pyc'), files) py_files = set(filter(lambda filename: filename.endswith('.py'), files)) excess_pyc_files = filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files for excess_pyc_file in excess_pyc_files: full_path = os.path.join(root, excess_pyc_file) if show_logs: log.debug('Removing old PYC file: %s', full_path) try: os.remove(full_path) except: log.error('Couldn\'t remove %s: %s', (full_path, traceback.format_exc())) for dir_name in dirs: full_path = os.path.join(root, dir_name) if len(os.listdir(full_path)) == 0: try: os.rmdir(full_path) except: log.error('Couldn\'t remove empty directory %s: %s', (full_path, traceback.format_exc()))
def getDirectories(self, path = '/', show_hidden = True): # Return driveletters or root if path is empty if path == '/' or not path or path == '\\': if os.name == 'nt': return self.getDriveLetters() path = '/' dirs = [] path = sp(path) for f in os.listdir(path): p = sp(os.path.join(path, f)) if os.path.isdir(p) and ((self.is_hidden(p) and bool(int(show_hidden))) or not self.is_hidden(p)): dirs.append(toUnicode('%s%s' % (p, os.path.sep))) return sorted(dirs)
def getAllDownloadStatus(self, ids): """ Get status of all active downloads :param ids: list of (mixed) downloader ids Used to match the releases for this downloader as there could be other downloaders active that it should ignore :return: list of releases """ raw_statuses = self.call('nzb') release_downloads = ReleaseDownloadList(self) for nzb in raw_statuses.get('nzbs', []): nzb_id = os.path.basename(nzb['nzbFileName']) if nzb_id in ids: # Check status status = 'busy' if nzb['state'] == 20: status = 'completed' elif nzb['state'] in [21, 22, 24]: status = 'failed' release_downloads.append({ 'temp_id': nzb['id'], 'id': nzb_id, 'name': nzb['uiTitle'], 'status': status, 'original_status': nzb['state'], 'timeleft': -1, 'folder': sp(nzb['destinationPath']), }) return release_downloads
def makeDir(self, path): path = sp(path) try: if not os.path.isdir(path): os.makedirs(path, Env.getPermission('folder')) return True except Exception as e: log.error('Unable to create folder "%s": %s', (path, e)) return False
def getAllDownloadStatus(self, ids): """ Get status of all active downloads :param ids: list of (mixed) downloader ids Used to match the releases for this downloader as there could be other downloaders active that it should ignore :return: list of releases """ log.debug('Checking Hadouken download status.') if not self.connect(): return [] release_downloads = ReleaseDownloadList(self) queue = self.hadouken_api.get_by_hash_list(ids) if not queue: return [] for torrent in queue: if torrent is None: continue torrent_filelist = self.hadouken_api.get_files_by_hash(torrent.info_hash) torrent_files = [] for file_item in torrent_filelist: torrent_files.append(sp(os.path.join(torrent.save_path, file_item))) release_downloads.append({ 'id': torrent.info_hash.upper(), 'name': torrent.name, 'status': torrent.get_status(), 'seed_ratio': torrent.get_seed_ratio(), 'original_status': torrent.state, 'timeleft': -1, 'folder': sp(torrent.save_path if len(torrent_files == 1) else os.path.join(torrent.save_path, torrent.name)), 'files': torrent_files }) return release_downloads
def getSize(paths): single = not isinstance(paths, (tuple, list)) if single: paths = [paths] total_size = 0 for path in paths: path = sp(path) if os.path.isdir(path): total_size = 0 for dirpath, _, filenames in os.walk(path): for f in filenames: total_size += os.path.getsize(sp(os.path.join(dirpath, f))) elif os.path.isfile(path): total_size += os.path.getsize(path) return total_size / 1048576 # MB
def deleteEmptyFolder(self, folder, show_error = True, only_clean = None): folder = sp(folder) for item in os.listdir(folder): full_folder = sp(os.path.join(folder, item)) if not only_clean or (item in only_clean and os.path.isdir(full_folder)): for subfolder, dirs, files in os.walk(full_folder, topdown = False): try: os.rmdir(subfolder) except: if show_error: log.info2('Couldn\'t remove directory %s: %s', (subfolder, traceback.format_exc())) try: os.rmdir(folder) except: if show_error: log.error('Couldn\'t remove empty directory %s: %s', (folder, traceback.format_exc()))
def has_hidden_attribute(self, filepath): result = False try: attrs = ctypes.windll.kernel32.GetFileAttributesW(sp(filepath)) #@UndefinedVariable assert attrs != -1 result = bool(attrs & 2) except (AttributeError, AssertionError): pass except: log.error('Failed getting hidden attribute: %s', traceback.format_exc()) return result
def test(self): """ Test and see if the directory is writable :return: boolean """ directory = self.conf("directory") if directory and os.path.isdir(directory): test_file = sp(os.path.join(directory, "couchpotato_test.txt")) # Check if folder is writable self.createFile(test_file, "This is a test file") if os.path.isfile(test_file): os.remove(test_file) return True return False
def test(self): """ Check if connection works :return: bool """ directory = self.conf('directory') if directory and os.path.isdir(directory): test_file = sp(os.path.join(directory, 'couchpotato_test.txt')) # Check if folder is writable self.createFile(test_file, 'This is a test file') if os.path.isfile(test_file): os.remove(test_file) return True return False
def replaceWith(self, path): path = sp(path) app_dir = Env.get('app_dir') data_dir = Env.get('data_dir') # Get list of files we want to overwrite removePyc(app_dir) existing_files = [] for root, subfiles, filenames in os.walk(app_dir): for filename in filenames: existing_files.append(os.path.join(root, filename)) for root, subfiles, filenames in os.walk(path): for filename in filenames: fromfile = os.path.join(root, filename) tofile = os.path.join(app_dir, fromfile.replace(path + os.path.sep, '')) if not Env.get('dev'): try: if os.path.isfile(tofile): os.remove(tofile) dirname = os.path.dirname(tofile) if not os.path.isdir(dirname): self.makeDir(dirname) shutil.move(fromfile, tofile) try: existing_files.remove(tofile) except ValueError: pass except: log.error('Failed overwriting file "%s": %s', (tofile, traceback.format_exc())) return False for still_exists in existing_files: if data_dir in still_exists: continue try: os.remove(still_exists) except: log.error('Failed removing non-used file: %s', traceback.format_exc()) return True
def manifest(handler): web_base = Env.get('web_base') static_base = Env.get('static_path') lines = [ 'CACHE MANIFEST', '# %s theme' % ('dark' if Env.setting('dark_theme') else 'light'), '', 'CACHE:', '' ] if not Env.get('dev'): # CSS for url in fireEvent('clientscript.get_styles', single = True): lines.append(web_base + url) # Scripts for url in fireEvent('clientscript.get_scripts', single = True): lines.append(web_base + url) # Favicon lines.append(static_base + 'images/favicon.ico') # Fonts font_folder = sp(os.path.join(Env.get('app_dir'), 'whatpotato', 'static', 'fonts')) for subfolder, dirs, files in os.walk(font_folder, topdown = False): for file in files: if '.woff' in file: lines.append(static_base + 'fonts/' + file + ('?%s' % os.path.getmtime(os.path.join(font_folder, file)))) else: lines.append('# Not caching anything in dev mode') # End lines lines.extend(['', 'NETWORK: ', '*']) handler.set_header('Content-Type', 'text/cache-manifest') return '\n'.join(lines)
def download(self, url = '', dest = None, overwrite = False, urlopen_kwargs = None): if not urlopen_kwargs: urlopen_kwargs = {} # Return response object to stream download urlopen_kwargs['stream'] = True if not dest: # to Cache dest = os.path.join(Env.get('cache_dir'), ss('%s.%s' % (md5(url), getExt(url)))) dest = sp(dest) if not overwrite and os.path.isfile(dest): return dest try: filedata = self.urlopen(url, **urlopen_kwargs) except: log.error('Failed downloading file %s: %s', (url, traceback.format_exc())) return False self.createFile(dest, filedata, binary = True) return dest
def getSubtitleLanguage(self, group): detected_languages = {} # Subliminal scanner paths = None try: paths = group['files']['movie'] scan_result = [] for p in paths: if not group['is_dvd']: video = Video.from_path(sp(p)) video_result = [(video, video.scan())] scan_result.extend(video_result) for video, detected_subtitles in scan_result: for s in detected_subtitles: if s.language and s.path not in paths: detected_languages[s.path] = [s.language] except: log.debug('Failed parsing subtitle languages for %s: %s', (paths, traceback.format_exc())) # IDX for extra in group['files']['subtitle_extra']: try: if os.path.isfile(extra): output = open(extra, 'r') txt = output.read() output.close() idx_langs = re.findall('\nid: (\w+)', txt) sub_file = '%s.sub' % os.path.splitext(extra)[0] if len(idx_langs) > 0 and os.path.isfile(sub_file): detected_languages[sub_file] = idx_langs except: log.error('Failed parsing subtitle idx for %s: %s', (extra, traceback.format_exc())) return detected_languages
def createFile(self, path, content, binary = False): path = sp(path) self.makeDir(os.path.dirname(path)) if os.path.exists(path): log.debug('%s already exists, overwriting file with new version', path) write_type = 'w+' if not binary else 'w+b' # Stream file using response object if isinstance(content, requests.models.Response): # Write file to temp with open('%s.tmp' % path, write_type) as f: for chunk in content.iter_content(chunk_size = 1048576): if chunk: # filter out keep-alive new chunks f.write(chunk) f.flush() # Rename to destination os.rename('%s.tmp' % path, path) else: try: f = open(path, write_type) f.write(content) f.close() try: os.chmod(path, Env.getPermission('file')) except: log.error('Failed writing permission to file "%s": %s', (path, traceback.format_exc())) except: log.error('Unable to write file "%s": %s', (path, traceback.format_exc())) if os.path.isfile(path): os.remove(path)
def updateLibrary(self, full = True): last_update_key = 'manage.last_update%s' % ('_full' if full else '') last_update = float(Env.prop(last_update_key, default = 0)) if self.in_progress: log.info('Already updating library: %s', self.in_progress) return elif self.isDisabled() or (last_update > time.time() - 20): return self.in_progress = {} fireEvent('notify.frontend', type = 'manage.updating', data = True) try: directories = self.directories() directories.sort() added_identifiers = [] # Add some progress for directory in directories: self.in_progress[os.path.normpath(directory)] = { 'started': False, 'eta': -1, 'total': None, 'to_go': None, } for directory in directories: folder = os.path.normpath(directory) self.in_progress[os.path.normpath(directory)]['started'] = tryInt(time.time()) if not os.path.isdir(folder): if len(directory) > 0: log.error('Directory doesn\'t exist: %s', folder) continue log.info('Updating manage library: %s', folder) fireEvent('notify.frontend', type = 'manage.update', data = True, message = 'Scanning for movies in "%s"' % folder) onFound = self.createAddToLibrary(folder, added_identifiers) fireEvent('scanner.scan', folder = folder, simple = True, newer_than = last_update if not full else 0, check_file_date = False, on_found = onFound, single = True) # Break if CP wants to shut down if self.shuttingDown(): break # If cleanup option is enabled, remove offline files from database if self.conf('cleanup') and full and not self.shuttingDown(): # Get movies with done status total_movies, done_movies = fireEvent('media.list', types = 'movie', status = 'done', release_status = 'done', status_or = True, single = True) deleted_releases = [] for done_movie in done_movies: if getIdentifier(done_movie) not in added_identifiers: fireEvent('media.delete', media_id = done_movie['_id'], delete_from = 'all') else: releases = done_movie.get('releases', []) for release in releases: if release.get('files'): brk = False for file_type in release.get('files', {}): for release_file in release['files'][file_type]: # Remove release not available anymore if not os.path.isfile(sp(release_file)): fireEvent('release.clean', release['_id']) brk = True break if brk: break # Check if there are duplicate releases (different quality) use the last one, delete the rest if len(releases) > 1: used_files = {} for release in releases: for file_type in release.get('files', {}): for release_file in release['files'][file_type]: already_used = used_files.get(release_file) if already_used: release_id = release['_id'] if already_used.get('last_edit', 0) > release.get('last_edit', 0) else already_used['_id'] if release_id not in deleted_releases: fireEvent('release.delete', release_id, single = True) deleted_releases.append(release_id) break else: used_files[release_file] = release del used_files # Break if CP wants to shut down if self.shuttingDown(): break if not self.shuttingDown(): db = get_db() db.reindex() Env.prop(last_update_key, time.time()) except: log.error('Failed updating library: %s', (traceback.format_exc())) while self.in_progress and len(self.in_progress) > 0 and not self.shuttingDown(): delete_me = {} # noinspection PyTypeChecker for folder in self.in_progress: if self.in_progress[folder]['to_go'] <= 0: delete_me[folder] = True for delete in delete_me: del self.in_progress[delete] time.sleep(1) fireEvent('notify.frontend', type = 'manage.updating', data = False) self.in_progress = False
def getAllDownloadStatus(self, ids): """ Get status of all active downloads :param ids: list of (mixed) downloader ids Used to match the releases for this downloader as there could be other downloaders active that it should ignore :return: list of releases """ log.debug('Checking NZBGet download status.') rpc = self.getRPC() try: if rpc.writelog('INFO', 'CouchPotato connected to check status'): log.debug('Successfully connected to NZBGet') else: log.info('Successfully connected to NZBGet, but unable to send a message') except socket.error: log.error('NZBGet is not responding. Please ensure that NZBGet is running and host setting is correct.') return [] except xmlrpclib.ProtocolError as e: if e.errcode == 401: log.error('Password is incorrect.') else: log.error('Protocol Error: %s', e) return [] # Get NZBGet data try: status = rpc.status() groups = rpc.listgroups() queue = rpc.postqueue(0) history = rpc.history() except: log.error('Failed getting data: %s', traceback.format_exc(1)) return [] release_downloads = ReleaseDownloadList(self) for nzb in groups: try: nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'whatpotato'][0] except: nzb_id = nzb['NZBID'] if nzb_id in ids: log.debug('Found %s in NZBGet download queue', nzb['NZBFilename']) timeleft = -1 try: if nzb['ActiveDownloads'] > 0 and nzb['DownloadRate'] > 0 and not (status['DownloadPaused'] or status['Download2Paused']): timeleft = str(timedelta(seconds = nzb['RemainingSizeMB'] / status['DownloadRate'] * 2 ^ 20)) except: pass release_downloads.append({ 'id': nzb_id, 'name': nzb['NZBFilename'], 'original_status': 'DOWNLOADING' if nzb['ActiveDownloads'] > 0 else 'QUEUED', # Seems to have no native API function for time left. This will return the time left after NZBGet started downloading this item 'timeleft': timeleft, }) for nzb in queue: # 'Parameters' is not passed in rpc.postqueue if nzb['NZBID'] in ids: log.debug('Found %s in NZBGet postprocessing queue', nzb['NZBFilename']) release_downloads.append({ 'id': nzb['NZBID'], 'name': nzb['NZBFilename'], 'original_status': nzb['Stage'], 'timeleft': str(timedelta(seconds = 0)) if not status['PostPaused'] else -1, }) for nzb in history: try: nzb_id = [param['Value'] for param in nzb['Parameters'] if param['Name'] == 'whatpotato'][0] except: nzb_id = nzb['NZBID'] if nzb_id in ids: log.debug('Found %s in NZBGet history. TotalStatus: %s, ParStatus: %s, ScriptStatus: %s, Log: %s', (nzb['NZBFilename'] , nzb['Status'], nzb['ParStatus'], nzb['ScriptStatus'] , nzb['Log'])) release_downloads.append({ 'id': nzb_id, 'name': nzb['NZBFilename'], 'status': 'completed' if 'SUCCESS' in nzb['Status'] else 'failed', 'original_status': nzb['Status'], 'timeleft': str(timedelta(seconds = 0)), 'folder': sp(nzb['DestDir']) }) return release_downloads
def getAllDownloadStatus(self, ids): """ Get status of all active downloads :param ids: list of (mixed) downloader ids Used to match the releases for this downloader as there could be other downloaders active that it should ignore :return: list of releases """ log.debug('Checking uTorrent download status.') if not self.connect(): return [] release_downloads = ReleaseDownloadList(self) data = self.utorrent_api.get_status() if not data: log.error('Error getting data from uTorrent') return [] queue = json.loads(data) if queue.get('error'): log.error('Error getting data from uTorrent: %s', queue.get('error')) return [] if not queue.get('torrents'): log.debug('Nothing in queue') return [] # Get torrents for torrent in queue['torrents']: if torrent[0] in ids: #Get files of the torrent torrent_files = [] try: torrent_files = json.loads(self.utorrent_api.get_files(torrent[0])) torrent_files = [sp(os.path.join(torrent[26], torrent_file[0])) for torrent_file in torrent_files['files'][1]] except: log.debug('Failed getting files from torrent: %s', torrent[2]) status = 'busy' if (torrent[1] & self.status_flags['STARTED'] or torrent[1] & self.status_flags['QUEUED']) and torrent[4] == 1000: status = 'seeding' elif torrent[1] & self.status_flags['ERROR']: status = 'failed' elif torrent[4] == 1000: status = 'completed' if not status == 'busy': self.removeReadOnly(torrent_files) release_downloads.append({ 'id': torrent[0], 'name': torrent[2], 'status': status, 'seed_ratio': float(torrent[7]) / 1000, 'original_status': torrent[1], 'timeleft': str(timedelta(seconds = torrent[10])), 'folder': sp(torrent[26]), 'files': torrent_files }) return release_downloads
def getAllDownloadStatus(self, ids): """ Get status of all active downloads :param ids: list of (mixed) downloader ids Used to match the releases for this downloader as there could be other downloaders active that it should ignore :return: list of releases """ log.debug('Checking Transmission download status.') if not self.connect(): return [] release_downloads = ReleaseDownloadList(self) return_params = { 'fields': ['id', 'name', 'hashString', 'percentDone', 'status', 'eta', 'isStalled', 'isFinished', 'downloadDir', 'uploadRatio', 'secondsSeeding', 'seedIdleLimit', 'files'] } session = self.trpc.get_session() queue = self.trpc.get_alltorrents(return_params) if not (queue and queue.get('torrents')): log.debug('Nothing in queue or error') return [] for torrent in queue['torrents']: if torrent['hashString'] in ids: log.debug('name=%s / id=%s / downloadDir=%s / hashString=%s / percentDone=%s / status=%s / isStalled=%s / eta=%s / uploadRatio=%s / isFinished=%s / incomplete-dir-enabled=%s / incomplete-dir=%s', (torrent['name'], torrent['id'], torrent['downloadDir'], torrent['hashString'], torrent['percentDone'], torrent['status'], torrent.get('isStalled', 'N/A'), torrent['eta'], torrent['uploadRatio'], torrent['isFinished'], session['incomplete-dir-enabled'], session['incomplete-dir'])) status = 'busy' if torrent.get('isStalled') and not torrent['percentDone'] == 1 and self.conf('stalled_as_failed'): status = 'failed' elif torrent['status'] == 0 and torrent['percentDone'] == 1: status = 'completed' elif torrent['status'] == 16 and torrent['percentDone'] == 1: status = 'completed' elif torrent['status'] in [5, 6]: status = 'seeding' if session['incomplete-dir-enabled'] and status == 'busy': torrent_folder = session['incomplete-dir'] else: torrent_folder = torrent['downloadDir'] torrent_files = [] for file_item in torrent['files']: torrent_files.append(sp(os.path.join(torrent_folder, file_item['name']))) release_downloads.append({ 'id': torrent['hashString'], 'name': torrent['name'], 'status': status, 'original_status': torrent['status'], 'seed_ratio': torrent['uploadRatio'], 'timeleft': str(timedelta(seconds = torrent['eta'])), 'folder': sp(torrent_folder if len(torrent_files) == 1 else os.path.join(torrent_folder, torrent['name'])), 'files': torrent_files }) return release_downloads
def scan(self, folder = None, files = None, release_download = None, simple = False, newer_than = 0, return_ignored = True, check_file_date = True, on_found = None): folder = sp(folder) if not folder or not os.path.isdir(folder): log.error('Folder doesn\'t exists: %s', folder) return {} # Get movie "master" files movie_files = {} leftovers = [] # Scan all files of the folder if no files are set if not files: try: files = [] for root, dirs, walk_files in os.walk(folder, followlinks=True): files.extend([sp(os.path.join(sp(root), ss(filename))) for filename in walk_files]) # Break if CP wants to shut down if self.shuttingDown(): break except: log.error('Failed getting files from %s: %s', (folder, traceback.format_exc())) log.debug('Found %s files to scan and group in %s', (len(files), folder)) else: check_file_date = False files = [sp(x) for x in files] for file_path in files: if not os.path.exists(file_path): continue # Remove ignored files if self.isSampleFile(file_path): leftovers.append(file_path) continue elif not self.keepFile(file_path): continue is_dvd_file = self.isDVDFile(file_path) if self.filesizeBetween(file_path, self.file_sizes['movie']) or is_dvd_file: # Minimal 300MB files or is DVD file # Normal identifier identifier = self.createStringIdentifier(file_path, folder, exclude_filename = is_dvd_file) identifiers = [identifier] # Identifier with quality quality = fireEvent('quality.guess', files = [file_path], size = self.getFileSize(file_path), single = True) if not is_dvd_file else {'identifier':'dvdr'} if quality: identifier_with_quality = '%s %s' % (identifier, quality.get('identifier', '')) identifiers = [identifier_with_quality, identifier] if not movie_files.get(identifier): movie_files[identifier] = { 'unsorted_files': [], 'identifiers': identifiers, 'is_dvd': is_dvd_file, } movie_files[identifier]['unsorted_files'].append(file_path) else: leftovers.append(file_path) # Break if CP wants to shut down if self.shuttingDown(): break # Cleanup del files # Sort reverse, this prevents "Iron man 2" from getting grouped with "Iron man" as the "Iron Man 2" # files will be grouped first. leftovers = set(sorted(leftovers, reverse = True)) # Group files minus extension ignored_identifiers = [] for identifier, group in movie_files.items(): if identifier not in group['identifiers'] and len(identifier) > 0: group['identifiers'].append(identifier) log.debug('Grouping files: %s', identifier) has_ignored = 0 for file_path in list(group['unsorted_files']): ext = getExt(file_path) wo_ext = file_path[:-(len(ext) + 1)] found_files = set([i for i in leftovers if wo_ext in i]) group['unsorted_files'].extend(found_files) leftovers = leftovers - found_files has_ignored += 1 if ext == 'ignore' else 0 if has_ignored == 0: for file_path in list(group['unsorted_files']): ext = getExt(file_path) has_ignored += 1 if ext == 'ignore' else 0 if has_ignored > 0: ignored_identifiers.append(identifier) # Break if CP wants to shut down if self.shuttingDown(): break # Create identifiers for all leftover files path_identifiers = {} for file_path in leftovers: identifier = self.createStringIdentifier(file_path, folder) if not path_identifiers.get(identifier): path_identifiers[identifier] = [] path_identifiers[identifier].append(file_path) # Group the files based on the identifier delete_identifiers = [] for identifier, found_files in path_identifiers.items(): log.debug('Grouping files on identifier: %s', identifier) group = movie_files.get(identifier) if group: group['unsorted_files'].extend(found_files) delete_identifiers.append(identifier) # Remove the found files from the leftover stack leftovers = leftovers - set(found_files) # Break if CP wants to shut down if self.shuttingDown(): break # Cleaning up used for identifier in delete_identifiers: if path_identifiers.get(identifier): del path_identifiers[identifier] del delete_identifiers # Group based on folder delete_identifiers = [] for identifier, found_files in path_identifiers.items(): log.debug('Grouping files on foldername: %s', identifier) for ff in found_files: new_identifier = self.createStringIdentifier(os.path.dirname(ff), folder) group = movie_files.get(new_identifier) if group: group['unsorted_files'].extend([ff]) delete_identifiers.append(identifier) # Remove the found files from the leftover stack leftovers -= leftovers - set([ff]) # Break if CP wants to shut down if self.shuttingDown(): break # leftovers should be empty if leftovers: log.debug('Some files are still left over: %s', leftovers) # Cleaning up used for identifier in delete_identifiers: if path_identifiers.get(identifier): del path_identifiers[identifier] del delete_identifiers # Make sure we remove older / still extracting files valid_files = {} while True and not self.shuttingDown(): try: identifier, group = movie_files.popitem() except: break # Check if movie is fresh and maybe still unpacking, ignore files newer than 1 minute if check_file_date: files_too_new, time_string = self.checkFilesChanged(group['unsorted_files']) if files_too_new: log.info('Files seem to be still unpacking or just unpacked (created on %s), ignoring for now: %s', (time_string, identifier)) # Delete the unsorted list del group['unsorted_files'] continue # Only process movies newer than x if newer_than and newer_than > 0: has_new_files = False for cur_file in group['unsorted_files']: file_time = self.getFileTimes(cur_file) if file_time[0] > newer_than or file_time[1] > newer_than: has_new_files = True break if not has_new_files: log.debug('None of the files have changed since %s for %s, skipping.', (time.ctime(newer_than), identifier)) # Delete the unsorted list del group['unsorted_files'] continue valid_files[identifier] = group del movie_files total_found = len(valid_files) # Make sure only one movie was found if a download ID is provided if release_download and total_found == 0: log.info('Download ID provided (%s), but no groups found! Make sure the download contains valid media files (fully extracted).', release_download.get('imdb_id')) elif release_download and total_found > 1: log.info('Download ID provided (%s), but more than one group found (%s). Ignoring Download ID...', (release_download.get('imdb_id'), len(valid_files))) release_download = None # Determine file types processed_movies = {} while True and not self.shuttingDown(): try: identifier, group = valid_files.popitem() except: break if return_ignored is False and identifier in ignored_identifiers: log.debug('Ignore file found, ignoring release: %s', identifier) total_found -= 1 continue # Group extra (and easy) files first group['files'] = { 'movie_extra': self.getMovieExtras(group['unsorted_files']), 'subtitle': self.getSubtitles(group['unsorted_files']), 'subtitle_extra': self.getSubtitlesExtras(group['unsorted_files']), 'nfo': self.getNfo(group['unsorted_files']), 'trailer': self.getTrailers(group['unsorted_files']), 'leftover': set(group['unsorted_files']), } # Media files if group['is_dvd']: group['files']['movie'] = self.getDVDFiles(group['unsorted_files']) else: group['files']['movie'] = self.getMediaFiles(group['unsorted_files']) if len(group['files']['movie']) == 0: log.error('Couldn\'t find any movie files for %s', identifier) total_found -= 1 continue log.debug('Getting metadata for %s', identifier) group['meta_data'] = self.getMetaData(group, folder = folder, release_download = release_download) # Subtitle meta group['subtitle_language'] = self.getSubtitleLanguage(group) if not simple else {} # Get parent dir from movie files for movie_file in group['files']['movie']: group['parentdir'] = os.path.dirname(movie_file) group['dirname'] = None folder_names = group['parentdir'].replace(folder, '').split(os.path.sep) folder_names.reverse() # Try and get a proper dirname, so no "A", "Movie", "Download" etc for folder_name in folder_names: if folder_name.lower() not in self.ignore_names and len(folder_name) > 2: group['dirname'] = folder_name break break # Leftover "sorted" files for file_type in group['files']: if not file_type is 'leftover': group['files']['leftover'] -= set(group['files'][file_type]) group['files'][file_type] = list(group['files'][file_type]) group['files']['leftover'] = list(group['files']['leftover']) # Delete the unsorted list del group['unsorted_files'] # Determine movie group['media'] = self.determineMedia(group, release_download = release_download) if not group['media']: log.error('Unable to determine media: %s', group['identifiers']) else: group['identifier'] = getIdentifier(group['media']) or group['media']['info'].get('imdb') processed_movies[identifier] = group # Notify parent & progress on something found if on_found: on_found(group, total_found, len(valid_files)) # Wait for all the async events calm down a bit while threading.activeCount() > 100 and not self.shuttingDown(): log.debug('Too many threads active, waiting a few seconds') time.sleep(10) if len(processed_movies) > 0: log.info('Found %s movies in the folder %s', (len(processed_movies), folder)) else: log.debug('Found no movies in the folder %s', folder) return processed_movies
def runCouchPotato(options, base_path, args, data_dir = None, log_dir = None, Env = None, desktop = None): try: locale.setlocale(locale.LC_ALL, "") encoding = locale.getpreferredencoding() except (locale.Error, IOError): encoding = None # for OSes that are poorly configured I'll just force UTF-8 if not encoding or encoding in ('ANSI_X3.4-1968', 'US-ASCII', 'ASCII'): encoding = 'UTF-8' Env.set('encoding', encoding) # Do db stuff db_path = sp(os.path.join(data_dir, 'database')) old_db_path = os.path.join(data_dir, 'whatpotato.db') # Remove database folder if both exists if os.path.isdir(db_path) and os.path.isfile(old_db_path): db = SuperThreadSafeDatabase(db_path) db.open() db.destroy() # Check if database exists db = SuperThreadSafeDatabase(db_path) db_exists = db.exists() if db_exists: # Backup before start and cleanup old backups backup_path = sp(os.path.join(data_dir, 'db_backup')) backup_count = 5 existing_backups = [] if not os.path.isdir(backup_path): os.makedirs(backup_path) for root, dirs, files in os.walk(backup_path): # Only consider files being a direct child of the backup_path if root == backup_path: for backup_file in sorted(files): ints = re.findall('\d+', backup_file) # Delete non zip files if len(ints) != 1: try: os.remove(os.path.join(root, backup_file)) except: pass else: existing_backups.append((int(ints[0]), backup_file)) else: # Delete stray directories. shutil.rmtree(root) # Remove all but the last 5 for eb in existing_backups[:-backup_count]: os.remove(os.path.join(backup_path, eb[1])) # Create new backup new_backup = sp(os.path.join(backup_path, '%s.tar.gz' % int(time.time()))) zipf = tarfile.open(new_backup, 'w:gz') for root, dirs, files in os.walk(db_path): for zfilename in files: zipf.add(os.path.join(root, zfilename), arcname = 'database/%s' % os.path.join(root[len(db_path) + 1:], zfilename)) zipf.close() # Open last db.open() else: db.create() # Force creation of cachedir log_dir = sp(log_dir) cache_dir = sp(os.path.join(data_dir, 'cache')) python_cache = sp(os.path.join(cache_dir, 'python')) if not os.path.exists(cache_dir): os.mkdir(cache_dir) if not os.path.exists(python_cache): os.mkdir(python_cache) session = requests.Session() session.max_redirects = 5 # Register environment settings Env.set('app_dir', sp(base_path)) Env.set('data_dir', sp(data_dir)) Env.set('log_path', sp(os.path.join(log_dir, 'CouchPotato.log'))) Env.set('db', db) Env.set('http_opener', session) Env.set('cache_dir', cache_dir) Env.set('cache', FileSystemCache(python_cache)) Env.set('console_log', options.console_log) Env.set('quiet', options.quiet) Env.set('desktop', desktop) Env.set('daemonized', options.daemon) Env.set('args', args) Env.set('options', options) # Determine debug debug = options.debug or Env.setting('debug', default = False, type = 'bool') Env.set('debug', debug) # Development development = Env.setting('development', default = False, type = 'bool') Env.set('dev', development) # Disable logging for some modules for logger_name in ['enzyme', 'guessit', 'subliminal', 'apscheduler', 'tornado', 'requests']: logging.getLogger(logger_name).setLevel(logging.ERROR) for logger_name in ['gntp']: logging.getLogger(logger_name).setLevel(logging.WARNING) # Disable SSL warning disable_warnings() # Use reloader reloader = debug is True and development and not Env.get('desktop') and not options.daemon # Logger logger = logging.getLogger() formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s', '%m-%d %H:%M:%S') level = logging.DEBUG if debug else logging.INFO logger.setLevel(level) logging.addLevelName(19, 'INFO') # To screen if (debug or options.console_log) and not options.quiet and not options.daemon: hdlr = logging.StreamHandler(sys.stderr) hdlr.setFormatter(formatter) logger.addHandler(hdlr) # To file hdlr2 = handlers.RotatingFileHandler(Env.get('log_path'), 'a', 500000, 10, encoding = Env.get('encoding')) hdlr2.setFormatter(formatter) logger.addHandler(hdlr2) # Start logging & enable colors # noinspection PyUnresolvedReferences import color_logs from whatpotato.core.logger import CPLog log = CPLog(__name__) log.debug('Started with options %s', options) # Check available space try: total_space, available_space = getFreeSpace(data_dir) if available_space < 100: log.error('Shutting down as CP needs some space to work. You\'ll get corrupted data otherwise. Only %sMB left', available_space) return except: log.error('Failed getting diskspace: %s', traceback.format_exc()) def customwarn(message, category, filename, lineno, file = None, line = None): log.warning('%s %s %s line:%s', (category, message, filename, lineno)) warnings.showwarning = customwarn # Create app from whatpotato import WebHandler web_base = ('/' + Env.setting('url_base').lstrip('/') + '/') if Env.setting('url_base') else '/' Env.set('web_base', web_base) api_key = Env.setting('api_key') if not api_key: api_key = uuid4().hex Env.setting('api_key', value = api_key) api_base = r'%sapi/%s/' % (web_base, api_key) Env.set('api_base', api_base) # Basic config host = Env.setting('host', default = '0.0.0.0') host6 = Env.setting('host6', default = '::') config = { 'use_reloader': reloader, 'port': tryInt(Env.setting('port', default = 5050)), 'host': host if host and len(host) > 0 else '0.0.0.0', 'host6': host6 if host6 and len(host6) > 0 else '::', 'ssl_cert': Env.setting('ssl_cert', default = None), 'ssl_key': Env.setting('ssl_key', default = None), } # Load the app application = Application( [], log_function = lambda x: None, debug = config['use_reloader'], gzip = True, cookie_secret = api_key, login_url = '%slogin/' % web_base, ) Env.set('app', application) # Request handlers application.add_handlers(".*$", [ (r'%snonblock/(.*)(/?)' % api_base, NonBlockHandler), # API handlers (r'%s(.*)(/?)' % api_base, ApiHandler), # Main API handler (r'%sgetkey(/?)' % web_base, KeyHandler), # Get API key (r'%s' % api_base, RedirectHandler, {"url": web_base + 'docs/'}), # API docs # Login handlers (r'%slogin(/?)' % web_base, LoginHandler), (r'%slogout(/?)' % web_base, LogoutHandler), # Catch all webhandlers (r'%s(.*)(/?)' % web_base, WebHandler), (r'(.*)', WebHandler), ]) # Static paths static_path = '%sstatic/' % web_base for dir_name in ['fonts', 'images', 'scripts', 'style']: application.add_handlers(".*$", [ ('%s%s/(.*)' % (static_path, dir_name), StaticFileHandler, {'path': sp(os.path.join(base_path, 'whatpotato', 'static', dir_name))}) ]) Env.set('static_path', static_path) # Load configs & plugins loader = Env.get('loader') loader.preload(root = sp(base_path)) loader.run() # Fill database with needed stuff fireEvent('database.setup') if not db_exists: fireEvent('app.initialize', in_order = True) fireEvent('app.migrate') # Go go go! from tornado.ioloop import IOLoop from tornado.autoreload import add_reload_hook loop = IOLoop.current() # Reload hook def reload_hook(): fireEvent('app.shutdown') add_reload_hook(reload_hook) # Some logging and fire load event try: log.info('Starting server on port %(port)s', config) except: pass fireEventAsync('app.load') ssl_options = None if config['ssl_cert'] and config['ssl_key']: ssl_options = { 'certfile': config['ssl_cert'], 'keyfile': config['ssl_key'], } server = HTTPServer(application, no_keep_alive = True, ssl_options = ssl_options) try_restart = True restart_tries = 5 while try_restart: try: server.listen(config['port'], config['host']) if Env.setting('ipv6', default = False): try: server.listen(config['port'], config['host6']) except: log.info2('Tried to bind to IPV6 but failed') loop.start() server.close_all_connections() server.stop() loop.close(all_fds = True) except Exception as e: log.error('Failed starting: %s', traceback.format_exc()) try: nr, msg = e if nr == 48: log.info('Port (%s) needed for CouchPotato is already in use, try %s more time after few seconds', (config.get('port'), restart_tries)) time.sleep(1) restart_tries -= 1 if restart_tries > 0: continue else: return except ValueError: return except: pass raise try_restart = False
def getAllDownloadStatus(self, ids): """ Get status of all active downloads :param ids: list of (mixed) downloader ids Used to match the releases for this downloader as there could be other downloaders active that it should ignore :return: list of releases """ log.debug('Checking SABnzbd download status.') # Go through Queue try: queue = self.call({ 'mode': 'queue', }) except: log.error('Failed getting queue: %s', traceback.format_exc(1)) return [] # Go through history items try: history = self.call({ 'mode': 'history', 'limit': 15, }) except: log.error('Failed getting history json: %s', traceback.format_exc(1)) return [] release_downloads = ReleaseDownloadList(self) # Get busy releases for nzb in queue.get('slots', []): if nzb['nzo_id'] in ids: status = 'busy' if 'ENCRYPTED / ' in nzb['filename']: status = 'failed' release_downloads.append({ 'id': nzb['nzo_id'], 'name': nzb['filename'], 'status': status, 'original_status': nzb['status'], 'timeleft': nzb['timeleft'] if not queue['paused'] else -1, }) # Get old releases for nzb in history.get('slots', []): if nzb['nzo_id'] in ids: status = 'busy' if nzb['status'] == 'Failed' or (nzb['status'] == 'Completed' and nzb['fail_message'].strip()): status = 'failed' elif nzb['status'] == 'Completed': status = 'completed' release_downloads.append({ 'id': nzb['nzo_id'], 'name': nzb['name'], 'status': status, 'original_status': nzb['status'], 'timeleft': str(timedelta(seconds = 0)), 'folder': sp(os.path.dirname(nzb['storage']) if os.path.isfile(nzb['storage']) else nzb['storage']), }) return release_downloads