def clean_up(): """ Perform any script cleanup that is required here. """ if nzb.lock_exists(SCRIPT_NAME): nzb.lock_release(SCRIPT_NAME) if nzb.lock_exists(LOCK_FILELIST): nzb.lock_release(LOCK_FILELIST) tempdir = get_temp_path() if len(os.listdir(tempdir)) == 0: if os.path.exists(tempdir): shutil.rmtree(tempdir)
def on_scheduled(): # Bail out if a lock exists, because post-processing is running. if nzb.lock_exists(SCRIPT_NAME): nzb.exit(nzb.PROCESS_SUCCESS) groups = nzb.proxy().listgroups(0) for group in groups: nzbid = int(group["NZBID"]) update_filepath = get_update_filepath(nzbid) # Look at the next group if we couldn't find it here. if not os.path.isfile(update_filepath): continue nzb.log_detail("Found state file at %s." % update_filepath) timestamp = int(time.mktime(datetime.datetime.utcnow().timetuple())) state = json.load(open(update_filepath, "r")) state_nzbname = state["nzbname"] state_lastcheck = int(state["lastcheck"]) state_retries = int(state["retries"]) wait_minutes = state_retries * RETRY_MINUTES elapsed_minutes = (timestamp - state_lastcheck) / 60 / 60 # If the wait time has elapsed, we need to unpause the file. if elapsed_minutes >= wait_minutes: nzb.log_detail("Resuming download for %s (%s)." % (state_nzbname, nzbid)) if not nzb.proxy().editqueue("GroupResume", 0, "", [nzbid]): reason = "Failed to resume %s (%s)." % (state_nzbname, nzbid) nzb.exit(nzb.PROCESS_FAIL_PROXY, reason) else: nzb.log_detail("Waiting for %s minutes, %s minutes elapsed." % (wait_minutes, elapsed_minutes))
def update_filelist(nzbid): # If a lock already exists in updating the cache file, bail out. if nzb.lock_exists(LOCK_FILELIST): return # Get the list of files from cache and from disk. nzb.lock_create(LOCK_FILELIST) try: cache_filepath = get_cache_filepath(nzbid) directory = nzb.get_nzb_directory() if not os.path.isdir(directory): nzb.log_warning('Directory %s does not appear valid.' % directory) filelist = nzb.get_new_files(os.listdir(directory), cache_filepath) # Cache the files that we've found that we just processed. with open(cache_filepath, 'a') as cachefile: for filename in filelist: name, extension = os.path.splitext(filename) if extension != '.tmp': cachefile.write(filename + '\n') process_download(directory, filename) cachefile.close() except Exception as e: traceback.print_exc() nzb.log_error(e) raise finally: nzb.lock_release(LOCK_FILELIST)
def on_scheduled(): # Bail out if a lock exists, because post-processing is running. if nzb.lock_exists(SCRIPT_NAME): nzb.exit(nzb.PROCESS_SUCCESS) groups = nzb.proxy().listgroups(0) for group in groups: nzbid = int(group['NZBID']) update_filepath = get_update_filepath(nzbid) # Look at the next group if we couldn't find it here. if not os.path.isfile(update_filepath): continue nzb.log_detail('Found state file at %s.' % update_filepath) timestamp = int(time.mktime(datetime.datetime.utcnow().timetuple())) state = json.load(open(update_filepath, 'r')) state_nzbname = state['nzbname'] state_lastcheck = int(state['lastcheck']) state_retries = int(state['retries']) wait_minutes = state_retries * RETRY_MINUTES elapsed_minutes = (timestamp - state_lastcheck) / 60 / 60 # If the wait time has elapsed, we need to unpause the file. if elapsed_minutes >= wait_minutes: nzb.log_detail('Resuming download for %s (%s).' % (state_nzbname, nzbid)) if not nzb.proxy().editqueue('GroupResume', 0, '', [nzbid]): reason = 'Failed to resume %s (%s).' % (state_nzbname, nzbid) nzb.exit(nzb.PROCESS_FAIL_PROXY, reason) else: nzb.log_detail('Waiting for %s minutes, %s minutes elapsed.' % (wait_minutes, elapsed_minutes))
def main(): """ We need to check to make sure the script can run in the provided environment and that certain status checks have occurred. All of the calls here will exit with an exit code if the check fails. """ try: # If the script state was set to Disabled, we don't need to run. if SCRIPT_STATE == 'Disabled': nzb.exit(nzb.PROCESS_SUCCESS) # Check the status before we decide if we can continue. nzb.check_nzb_status() # Check if lock exists. if nzb.lock_exists('FileMover'): nzb.log_info('Lock exists, skipping execution.') nzb.exit(nzb.PROCESS_SUCCESS) else: nzb.lock_create('FileMover') # Check version of NZBGet to make sure we can run. nzb.check_nzb_version(13.0) # Wire up your event handlers before the call. # User the form nzb.set_handler(<event>, <function>) nzb.set_handler('POST_PROCESSING', on_post_processing) nzb.set_handler('SCHEDULED', on_scheduled) # Do not change this line, it checks the current event # and executes any event handlers. nzb.execute() except Exception as e: traceback.print_exc() nzb.exit(nzb.PROCESS_ERROR, e) finally: clean_up()
def main(): """ We need to check to make sure the script can run in the provided environment and that certain status checks have occurred. All of the calls here will exit with an exit code if the check fails. """ try: # If the script state was set to Disabled, we don't need to run. if SCRIPT_STATE == "Disabled": nzb.exit(nzb.PROCESS_SUCCESS) # Check the status before we decide if we can continue. nzb.check_nzb_status() # Check if lock exists. if nzb.lock_exists("FileMover"): nzb.log_info("Lock exists, skipping execution.") nzb.exit(nzb.PROCESS_SUCCESS) else: nzb.lock_create("FileMover") # Check version of NZBGet to make sure we can run. nzb.check_nzb_version(13.0) # Wire up your event handlers before the call. # User the form nzb.set_handler(<event>, <function>) nzb.set_handler("POST_PROCESSING", on_post_processing) nzb.set_handler("SCHEDULED", on_scheduled) # Do not change this line, it checks the current event # and executes any event handlers. nzb.execute() except Exception as e: traceback.print_exc() nzb.exit(nzb.PROCESS_ERROR, e) finally: clean_up()
def on_nzb_downloaded(): if nzb.lock_exists(SCRIPT_NAME): # Only clean up once we're done downloading. clean_up()