def update_filelist(nzbid): # If a lock already exists in updating the cache file, bail out. if nzb.lock_exists(LOCK_FILELIST): return # Get the list of files from cache and from disk. nzb.lock_create(LOCK_FILELIST) try: cache_filepath = get_cache_filepath(nzbid) directory = nzb.get_nzb_directory() if not os.path.isdir(directory): nzb.log_warning('Directory %s does not appear valid.' % directory) filelist = nzb.get_new_files(os.listdir(directory), cache_filepath) # Cache the files that we've found that we just processed. with open(cache_filepath, 'a') as cachefile: for filename in filelist: name, extension = os.path.splitext(filename) if extension != '.tmp': cachefile.write(filename + '\n') process_download(directory, filename) cachefile.close() except Exception as e: traceback.print_exc() nzb.log_error(e) raise finally: nzb.lock_release(LOCK_FILELIST)
def reorder_queued_items(nzbid): """ Finds the last part of the RAR archive and moves to the top of the queue. """ # If another script already sorted, then we can skip sorting. if bool(nzb.get_script_variable('RAR_SORTED')): nzb.log_info('Last RAR file was already sorted.') return # Get the list of files for this NZB. proxy = nzb.proxy() filelist = proxy.listfiles(0, 0, nzbid) # Enumerate the RAR files from the NZB and try to parse the part number. files = nzb.get_rar_xmlfiles(filelist) # If we found RAR files, we need to sort so that the last RAR file is the first # item in the list. if files: files_sorted = sorted(files, key=operator.itemgetter('number'), reverse=True) filename = files_sorted[0]['filename'] fileid = int(files_sorted[0]['fileid']) if proxy.editqueue('FileMoveTop', 0, '', [fileid]): nzb.log_detail('Moved last RAR file %s to the top.' % filename) nzb.set_script_variable('RAR_SORTED', True) else: nzb.log_warning('Failed to move the last RAR file %s.' % filename) else: nzb.log_warning('Failed to get list of files to sort.')
def process_download(directory, filename): if not os.path.isdir(directory): nzb.log_warning('Directory %s does not appear valid.' % directory) filepath = os.path.join(directory, filename) cache_filepath = get_cache_filepath('%s-contents' % nzb.get_nzb_id()) contentlist = nzb.get_rar_filelist(filepath) filelist = nzb.get_new_files(contentlist, cache_filepath) with open(cache_filepath, 'a') as cachefile: for file in filelist: inspect_rar_content(directory, file) cachefile.write(file + '\n') cachefile.close()
def on_scheduled(): categories = get_categories() proxy = nzb.proxy() histories = proxy.history() nzb.log_info('Processing histories...') for history in histories: category = history['Category'] finaldir = history['FinalDir'] status = history['Status'] nzbid = int(history['NZBID']) if finaldir and category in categories and status == 'SUCCESS/ALL': if not proxy.editqueue('HistoryDelete', 0, '', [nzbid]): nzb.log_warning('Failed to mark %s as hidden.' % nzbid) nzb.log_info('Completed processing histories.')
def on_scheduled(): categories = get_categories() proxy = nzb.proxy() histories = proxy.history() nzb.log_info("Processing histories...") for history in histories: category = history["Category"] finaldir = history["FinalDir"] status = history["Status"] nzbid = int(history["NZBID"]) if finaldir and category in categories and status == "SUCCESS/ALL": if not proxy.editqueue("HistoryDelete", 0, "", [nzbid]): nzb.log_warning("Failed to mark %s as hidden." % nzbid) nzb.log_info("Completed processing histories.")
def on_post_processing(): directory = nzb.get_nzb_directory() category = nzb.get_nzb_category() target = get_category_path(category) if os.path.isdir(directory) and target: # We need to move the files, delete the directory, and hide the NZB # from history. file = get_largest_file(category, directory, target) if file: nzb.log_detail('Found largest file %s.' % file) source_path = file target_path = os.path.join(target, os.path.basename(file)) if os.path.isfile(target_path): nzb.log_warning('File %s already exists.' % target_path) else: nzb.log_detail('Copying %s to %s.' % (file, target_path)) shutil.copyfile(source_path, target_path) nzb.set_nzb_directory_final(target) shutil.rmtree(directory) nzb.log_detail('Deleted directory %s.' % directory) else: nzb.log_warning('Failed to find largest video file.') else: nzb.log_warning('Directory %s does not exist.' % directory)
def on_post_processing(): directory = nzb.get_nzb_directory() category = nzb.get_nzb_category() target = get_category_path(category) if os.path.isdir(directory) and target: # We need to move the files, delete the directory, and hide the NZB # from history. file = get_largest_file(category, directory, target) if file: nzb.log_detail("Found largest file %s." % file) source_path = file target_path = os.path.join(target, os.path.basename(file)) if os.path.isfile(target_path): nzb.log_warning("File %s already exists." % target_path) else: nzb.log_detail("Copying %s to %s." % (file, target_path)) shutil.copyfile(source_path, target_path) nzb.set_nzb_directory_final(target) shutil.rmtree(directory) nzb.log_detail("Deleted directory %s." % directory) else: nzb.log_warning("Failed to find largest video file.") else: nzb.log_warning("Directory %s does not exist." % directory)
def on_post_processing(): # Create a lock so that the scheduler also doesn't try to run. nzb.lock_reset(SCRIPT_NAME) status = nzb.get_nzb_status() if status != 'FAILURE/HEALTH': nzb.log_detail('Nothing to do, status was %s.' % status) nzb.exit(nzb.PROCESS_SUCCESS) try: nzbid = nzb.get_nzb_id() nzbname = nzb.get_nzb_name() nzb.log_detail('Performing health check on %s (%s).' % (nzbname, status)) check_limit_age(nzbid, nzbname) check_limit_retries(nzbid, nzbname) # Stop all other post-processing because we need to requeue the file. nzb.log_warning('Pausing %s due to status of %s.' % (nzbname, status)) proxy = nzb.proxy() # Pause the file group. if not proxy.editqueue('GroupPause', 0, '', [nzbid]): reason = 'Failed to pause %s (%s).' % (nzbname, nzbid) nzb.exit(nzb.PROCESS_FAIL_PROXY, reason) # Send the file back to the queue. if not proxy.editqueue('HistoryReturn', 0, '', [nzbid]): reason = 'Failed to requeue %s (%s).' % (nzbname, nzbid) nzb.exit(nzb.PROCESS_FAIL_PROXY, reason) except Exception as e: traceback.print_exc() nzb.exit(nzb.PROCESS_ERROR, e) finally: nzb.lock_release(SCRIPT_NAME) clean_up()
def on_post_processing(): # Create a lock so that the scheduler also doesn't try to run. nzb.lock_reset(SCRIPT_NAME) status = nzb.get_nzb_status() if status != "FAILURE/HEALTH": nzb.log_detail("Nothing to do, status was %s." % status) nzb.exit(nzb.PROCESS_SUCCESS) try: nzbid = nzb.get_nzb_id() nzbname = nzb.get_nzb_name() nzb.log_detail("Performing health check on %s (%s)." % (nzbname, status)) check_limit_age(nzbid, nzbname) check_limit_retries(nzbid, nzbname) # Stop all other post-processing because we need to requeue the file. nzb.log_warning("Pausing %s due to status of %s." % (nzbname, status)) proxy = nzb.proxy() # Pause the file group. if not proxy.editqueue("GroupPause", 0, "", [nzbid]): reason = "Failed to pause %s (%s)." % (nzbname, nzbid) nzb.exit(nzb.PROCESS_FAIL_PROXY, reason) # Send the file back to the queue. if not proxy.editqueue("HistoryReturn", 0, "", [nzbid]): reason = "Failed to requeue %s (%s)." % (nzbname, nzbid) nzb.exit(nzb.PROCESS_FAIL_PROXY, reason) except Exception as e: traceback.print_exc() nzb.exit(nzb.PROCESS_ERROR, e) finally: nzb.lock_release(SCRIPT_NAME) clean_up()