def update_filelist(nzbid): # If a lock already exists in updating the cache file, bail out. if nzb.lock_exists(LOCK_FILELIST): return # Get the list of files from cache and from disk. nzb.lock_create(LOCK_FILELIST) try: cache_filepath = get_cache_filepath(nzbid) directory = nzb.get_nzb_directory() if not os.path.isdir(directory): nzb.log_warning('Directory %s does not appear valid.' % directory) filelist = nzb.get_new_files(os.listdir(directory), cache_filepath) # Cache the files that we've found that we just processed. with open(cache_filepath, 'a') as cachefile: for filename in filelist: name, extension = os.path.splitext(filename) if extension != '.tmp': cachefile.write(filename + '\n') process_download(directory, filename) cachefile.close() except Exception as e: traceback.print_exc() nzb.log_error(e) raise finally: nzb.lock_release(LOCK_FILELIST)
def clean_up(): """ Perform any script cleanup that is required here. """ if nzb.lock_exists(SCRIPT_NAME): nzb.lock_release(SCRIPT_NAME) if nzb.lock_exists(LOCK_FILELIST): nzb.lock_release(LOCK_FILELIST) tempdir = get_temp_path() if len(os.listdir(tempdir)) == 0: if os.path.exists(tempdir): shutil.rmtree(tempdir)
def on_post_processing(): # Create a lock so that the scheduler also doesn't try to run. nzb.lock_reset(SCRIPT_NAME) status = nzb.get_nzb_status() if status != 'FAILURE/HEALTH': nzb.log_detail('Nothing to do, status was %s.' % status) nzb.exit(nzb.PROCESS_SUCCESS) try: nzbid = nzb.get_nzb_id() nzbname = nzb.get_nzb_name() nzb.log_detail('Performing health check on %s (%s).' % (nzbname, status)) check_limit_age(nzbid, nzbname) check_limit_retries(nzbid, nzbname) # Stop all other post-processing because we need to requeue the file. nzb.log_warning('Pausing %s due to status of %s.' % (nzbname, status)) proxy = nzb.proxy() # Pause the file group. if not proxy.editqueue('GroupPause', 0, '', [nzbid]): reason = 'Failed to pause %s (%s).' % (nzbname, nzbid) nzb.exit(nzb.PROCESS_FAIL_PROXY, reason) # Send the file back to the queue. if not proxy.editqueue('HistoryReturn', 0, '', [nzbid]): reason = 'Failed to requeue %s (%s).' % (nzbname, nzbid) nzb.exit(nzb.PROCESS_FAIL_PROXY, reason) except Exception as e: traceback.print_exc() nzb.exit(nzb.PROCESS_ERROR, e) finally: nzb.lock_release(SCRIPT_NAME) clean_up()
def on_post_processing(): # Create a lock so that the scheduler also doesn't try to run. nzb.lock_reset(SCRIPT_NAME) status = nzb.get_nzb_status() if status != "FAILURE/HEALTH": nzb.log_detail("Nothing to do, status was %s." % status) nzb.exit(nzb.PROCESS_SUCCESS) try: nzbid = nzb.get_nzb_id() nzbname = nzb.get_nzb_name() nzb.log_detail("Performing health check on %s (%s)." % (nzbname, status)) check_limit_age(nzbid, nzbname) check_limit_retries(nzbid, nzbname) # Stop all other post-processing because we need to requeue the file. nzb.log_warning("Pausing %s due to status of %s." % (nzbname, status)) proxy = nzb.proxy() # Pause the file group. if not proxy.editqueue("GroupPause", 0, "", [nzbid]): reason = "Failed to pause %s (%s)." % (nzbname, nzbid) nzb.exit(nzb.PROCESS_FAIL_PROXY, reason) # Send the file back to the queue. if not proxy.editqueue("HistoryReturn", 0, "", [nzbid]): reason = "Failed to requeue %s (%s)." % (nzbname, nzbid) nzb.exit(nzb.PROCESS_FAIL_PROXY, reason) except Exception as e: traceback.print_exc() nzb.exit(nzb.PROCESS_ERROR, e) finally: nzb.lock_release(SCRIPT_NAME) clean_up()
def clean_up(): """ Perform any script cleanup that is required here. """ nzb.lock_release('FileMover')
def clean_up(): """ Perform any script cleanup that is required here. """ nzb.lock_release("FileMover")