def scan(config, lock, path, scan_for, section, scan_type, resleep_paths): scan_path = "" # sleep for delay while True: if config['SERVER_SCAN_DELAY']: logger.info( "Scan request from %s for '%s', sleeping for %d seconds...", scan_for, path, config['SERVER_SCAN_DELAY']) time.sleep(config['SERVER_SCAN_DELAY']) else: logger.info("Scan request from %s for '%s'", scan_for, path) # check if root scan folder for if path in resleep_paths: logger.info( "Another scan request occurred for folder of '%s', sleeping again!", path) utils.remove_item_from_list(path, resleep_paths) else: break # check file exists checks = 0 check_path = utils.map_pushed_path_file_exists(config, path) scan_path_is_directory = os.path.isdir(check_path) while True: checks += 1 if os.path.exists(check_path): logger.info("File '%s' exists on check %d of %d.", check_path, checks, config['SERVER_MAX_FILE_CHECKS']) if not scan_path or not len(scan_path): scan_path = os.path.dirname(path).strip( ) if not scan_path_is_directory else path.strip() break elif not scan_path_is_directory and config['SERVER_SCAN_FOLDER_ON_FILE_EXISTS_EXHAUSTION'] and \ config['SERVER_MAX_FILE_CHECKS'] - checks == 1: # penultimate check but SERVER_SCAN_FOLDER_ON_FILE_EXISTS_EXHAUSTION was turned on # lets make scan path the folder instead for the final check logger.warning( "File '%s' reached the penultimate file check, changing scan path to '%s', final check commences " "in 60 seconds", check_path, os.path.dirname(path)) check_path = os.path.dirname(check_path).strip() scan_path = os.path.dirname(path).strip() scan_path_is_directory = os.path.isdir(check_path) time.sleep(60) # send rclone cache clear if enabled if config['RCLONE_RC_CACHE_EXPIRE']['ENABLED']: utils.rclone_rc_clear_cache(config, check_path) elif checks >= config['SERVER_MAX_FILE_CHECKS']: logger.warning( "File '%s' exhausted all available checks, aborting scan request.", check_path) # remove item from database if sqlite is enabled if config['SERVER_USE_SQLITE']: if db.remove_item(path): logger.info("Removed '%s' from database", path) time.sleep(1) else: logger.error("Failed removing '%s' from database", path) return else: logger.info( "File '%s' did not exist on check %d of %d, checking again in 60 seconds.", check_path, checks, config['SERVER_MAX_FILE_CHECKS']) time.sleep(60) # send rclone cache clear if enabled if config['RCLONE_RC_CACHE_EXPIRE']['ENABLED']: utils.rclone_rc_clear_cache(config, check_path) # build plex scanner command if os.name == 'nt': final_cmd = '"%s" --scan --refresh --section %s --directory "%s"' \ % (config['PLEX_SCANNER'], str(section), scan_path) else: cmd = 'export LD_LIBRARY_PATH=' + config['PLEX_LD_LIBRARY_PATH'] + ';' if not config['USE_DOCKER']: cmd += 'export PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR=' + config[ 'PLEX_SUPPORT_DIR'] + ';' cmd += config['PLEX_SCANNER'] + ' --scan --refresh --section ' + str( section) + ' --directory ' + cmd_quote(scan_path) if config['USE_DOCKER']: final_cmd = 'docker exec -u %s -i %s bash -c %s' % \ (cmd_quote(config['PLEX_USER']), cmd_quote(config['DOCKER_NAME']), cmd_quote(cmd)) elif config['USE_SUDO']: final_cmd = 'sudo -u %s bash -c %s' % (config['PLEX_USER'], cmd_quote(cmd)) else: final_cmd = cmd # invoke plex scanner priority = utils.get_priority(config, scan_path) logger.debug( "Waiting for turn in the scan request backlog with priority: %d", priority) lock.acquire(priority) try: logger.info("Scan request is now being processed") # wait for existing scanners being ran by plex if config['PLEX_WAIT_FOR_EXTERNAL_SCANNERS']: scanner_name = os.path.basename(config['PLEX_SCANNER']).replace( '\\', '') if not utils.wait_running_process(scanner_name): logger.warning( "There was a problem waiting for existing '%s' process(s) to finish, aborting scan.", scanner_name) # remove item from database if sqlite is enabled if config['SERVER_USE_SQLITE']: if db.remove_item(path): logger.info("Removed '%s' from database", path) time.sleep(1) else: logger.error("Failed removing '%s' from database", path) return else: logger.info("No '%s' processes were found.", scanner_name) # run external command if supplied if len(config['RUN_COMMAND_BEFORE_SCAN']) > 2: logger.info("Running external command: %r", config['RUN_COMMAND_BEFORE_SCAN']) utils.run_command(config['RUN_COMMAND_BEFORE_SCAN']) logger.info("Finished running external command") # begin scan logger.info("Starting Plex Scanner") logger.debug(final_cmd) utils.run_command(final_cmd.encode("utf-8")) logger.info("Finished scan!") # remove item from database if sqlite is enabled if config['SERVER_USE_SQLITE']: if db.remove_item(path): logger.info("Removed '%s' from database", path) time.sleep(1) logger.info("There is %d queued items remaining...", db.queued_count()) else: logger.error("Failed removing '%s' from database", path) # empty trash if configured if config['PLEX_EMPTY_TRASH'] and config['PLEX_TOKEN'] and config[ 'PLEX_EMPTY_TRASH_MAX_FILES']: logger.info("Checking deleted item count in 10 seconds...") time.sleep(10) # check deleted item count, don't proceed if more than this value deleted_items = get_deleted_count(config) if deleted_items > config['PLEX_EMPTY_TRASH_MAX_FILES']: logger.warning( "There were %d deleted files, skipping emptying trash for section %s", deleted_items, section) elif deleted_items == -1: logger.error( "Could not determine deleted item count, aborting emptying trash" ) elif not config[ 'PLEX_EMPTY_TRASH_ZERO_DELETED'] and not deleted_items and scan_type != 'Upgrade': logger.info( "Skipping emptying trash as there were no deleted items") else: logger.info("Emptying trash to clear %d deleted items", deleted_items) empty_trash(config, str(section)) # analyze movie/episode if config['PLEX_ANALYZE_TYPE'].lower( ) != 'off' and not scan_path_is_directory: logger.debug("Sleeping 10 seconds before sending analyze request") time.sleep(10) analyze_item(config, path) except Exception: logger.exception( "Unexpected exception occurred while processing: '%s'", scan_path) finally: lock.release() return
def scan(config, lock, path, scan_for, section, scan_type, resleep_paths): scan_path = "" # sleep for delay while True: if config['SERVER_SCAN_DELAY']: logger.info("Scan request for '%s', sleeping for %d seconds...", path, config['SERVER_SCAN_DELAY']) time.sleep(config['SERVER_SCAN_DELAY']) else: logger.info("Scan request for '%s'", path) # check if root scan folder for if path in resleep_paths: logger.info( "Another scan request occurred for folder of '%s', sleeping again!", path) utils.remove_item_from_list(path, resleep_paths) else: break # check file exists if scan_for == 'radarr' or scan_for == 'sonarr_dev' or scan_for == 'manual': checks = 0 check_path = utils.map_pushed_path_file_exists(config, path) while True: checks += 1 if os.path.exists(check_path): logger.info("File '%s' exists on check %d of %d.", check_path, checks, config['SERVER_MAX_FILE_CHECKS']) scan_path = os.path.dirname(path).strip() break elif checks >= config['SERVER_MAX_FILE_CHECKS']: logger.warning( "File '%s' exhausted all available checks, aborting scan request.", check_path) # remove item from database if sqlite is enabled if config['SERVER_USE_SQLITE']: if db.remove_item(path): logger.info("Removed '%s' from database", path) time.sleep(1) else: logger.error("Failed removing '%s' from database", path) return else: logger.info( "File '%s' did not exist on check %d of %d, checking again in 60 seconds.", check_path, checks, config['SERVER_MAX_FILE_CHECKS']) time.sleep(60) else: # old sonarr doesnt pass the sonarr_episodefile_path in webhook, so we cannot check until this is corrected. scan_path = path.strip() # build plex scanner command if os.name == 'nt': final_cmd = '""%s" --scan --refresh --section %s --directory "%s""' \ % (config['PLEX_SCANNER'], str(section), scan_path) else: cmd = 'export LD_LIBRARY_PATH=' + config['PLEX_LD_LIBRARY_PATH'] + ';' if not config['USE_DOCKER']: cmd += 'export PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR=' + config[ 'PLEX_SUPPORT_DIR'] + ';' cmd += config['PLEX_SCANNER'] + ' --scan --refresh --section ' + str( section) + ' --directory ' + cmd_quote(scan_path) if config['USE_DOCKER']: final_cmd = 'docker exec -u %s -i %s bash -c %s' % \ (cmd_quote(config['PLEX_USER']), cmd_quote(config['DOCKER_NAME']), cmd_quote(cmd)) elif config['USE_SUDO']: final_cmd = 'sudo -u %s bash -c %s' % (config['PLEX_USER'], cmd_quote(cmd)) else: final_cmd = cmd # invoke plex scanner logger.debug("Waiting for turn in the scan request backlog...") with lock: logger.info("Scan request is now being processed") # wait for existing scanners being ran by plex if config['PLEX_WAIT_FOR_EXTERNAL_SCANNERS']: scanner_name = os.path.basename(config['PLEX_SCANNER']).replace( '\\', '') if not utils.wait_running_process(scanner_name): logger.warning( "There was a problem waiting for existing '%s' process(s) to finish, aborting scan.", scanner_name) # remove item from database if sqlite is enabled if config['SERVER_USE_SQLITE']: if db.remove_item(path): logger.info("Removed '%s' from database", path) time.sleep(1) else: logger.error("Failed removing '%s' from database", path) return else: logger.info("No '%s' processes were found.", scanner_name) # begin scan logger.info("Starting Plex Scanner") logger.debug(final_cmd) utils.run_command(final_cmd.encode("utf-8")) logger.info("Finished scan!") # remove item from database if sqlite is enabled if config['SERVER_USE_SQLITE']: if db.remove_item(path): logger.info("Removed '%s' from database", path) time.sleep(1) logger.info("There is %d queued items remaining...", db.queued_count()) else: logger.error("Failed removing '%s' from database", path) # empty trash if configured if config['PLEX_EMPTY_TRASH'] and config['PLEX_TOKEN'] and config[ 'PLEX_EMPTY_TRASH_MAX_FILES']: logger.info("Checking deleted item count in 10 seconds...") time.sleep(10) # check deleted item count, don't proceed if more than this value deleted_items = get_deleted_count(config) if deleted_items > config['PLEX_EMPTY_TRASH_MAX_FILES']: logger.warning( "There were %d deleted files, skipping emptying trash for section %s", deleted_items, section) elif deleted_items == -1: logger.error( "Could not determine deleted item count, aborting emptying trash" ) elif not config[ 'PLEX_EMPTY_TRASH_ZERO_DELETED'] and not deleted_items and scan_type != 'Upgrade': logger.info( "Skipping emptying trash as there were no deleted items") else: logger.info("Emptying trash to clear %d deleted items", deleted_items) empty_trash(config, str(section)) # analyze movie/season if config['PLEX_ANALYZE_FILE'] and config['PLEX_TOKEN'] and config[ 'PLEX_LOCAL_URL']: logger.debug("Sleeping 10 seconds before sending analyze request") time.sleep(10) analyze_item(config, path) return
def scan(config, lock, path, scan_for, section, scan_type, resleep_paths, scan_title=None, scan_lookup_type=None, scan_lookup_id=None): scan_path = "" # sleep for delay while True: logger.info("Scan request from %s for '%s'.", scan_for, path) if config['SERVER_SCAN_DELAY']: logger.info("Sleeping for %d seconds...", config['SERVER_SCAN_DELAY']) time.sleep(config['SERVER_SCAN_DELAY']) # check if root scan folder for if path in resleep_paths: logger.info("Another scan request occurred for folder of '%s'.", path) logger.info("Sleeping again for %d seconds...", config['SERVER_SCAN_DELAY']) utils.remove_item_from_list(path, resleep_paths) else: break # check file exists checks = 0 check_path = utils.map_pushed_path_file_exists(config, path) scan_path_is_directory = os.path.isdir(check_path) while True: checks += 1 if os.path.exists(check_path): logger.info("File '%s' exists on check %d of %d.", check_path, checks, config['SERVER_MAX_FILE_CHECKS']) if not scan_path or not len(scan_path): scan_path = os.path.dirname(path).strip( ) if not scan_path_is_directory else path.strip() break elif not scan_path_is_directory and config['SERVER_SCAN_FOLDER_ON_FILE_EXISTS_EXHAUSTION'] and \ config['SERVER_MAX_FILE_CHECKS'] - checks == 1: # penultimate check but SERVER_SCAN_FOLDER_ON_FILE_EXISTS_EXHAUSTION was turned on # lets make scan path the folder instead for the final check logger.warning( "File '%s' reached the penultimate file check. Changing scan path to '%s'. Final check commences " "in %s seconds...", check_path, os.path.dirname(path), config['SERVER_FILE_CHECK_DELAY']) check_path = os.path.dirname(check_path).strip() scan_path = os.path.dirname(path).strip() scan_path_is_directory = os.path.isdir(check_path) time.sleep(config['SERVER_FILE_CHECK_DELAY']) # send Rclone cache clear if enabled if config['RCLONE']['RC_CACHE_REFRESH']['ENABLED']: utils.rclone_rc_clear_cache(config, check_path) elif checks >= config['SERVER_MAX_FILE_CHECKS']: logger.warning( "File '%s' exhausted all available checks. Aborting scan request.", check_path) # remove item from database if sqlite is enabled if config['SERVER_USE_SQLITE']: if db.remove_item(path): logger.info("Removed '%s' from Plex Autoscan database.", path) time.sleep(1) else: logger.error( "Failed removing '%s' from Plex Autoscan database.", path) return else: logger.info( "File '%s' did not exist on check %d of %d. Checking again in %s seconds...", check_path, checks, config['SERVER_MAX_FILE_CHECKS'], config['SERVER_FILE_CHECK_DELAY']) time.sleep(config['SERVER_FILE_CHECK_DELAY']) # send Rclone cache clear if enabled if config['RCLONE']['RC_CACHE_REFRESH']['ENABLED']: utils.rclone_rc_clear_cache(config, check_path) # build plex scanner command if os.name == 'nt': final_cmd = '"%s" --scan --refresh --section %s --directory "%s"' \ % (config['PLEX_SCANNER'], str(section), scan_path) else: cmd = 'export LD_LIBRARY_PATH=' + config['PLEX_LD_LIBRARY_PATH'] + ';' if not config['USE_DOCKER']: cmd += 'export PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR=' + config[ 'PLEX_SUPPORT_DIR'] + ';' cmd += config['PLEX_SCANNER'] + ' --scan --refresh --section ' + str( section) + ' --directory ' + cmd_quote(scan_path) if config['USE_DOCKER']: final_cmd = 'docker exec -u %s -i %s bash -c %s' % \ (cmd_quote(config['PLEX_USER']), cmd_quote(config['DOCKER_NAME']), cmd_quote(cmd)) elif config['USE_SUDO']: final_cmd = 'sudo -u %s bash -c %s' % (config['PLEX_USER'], cmd_quote(cmd)) else: final_cmd = cmd # invoke plex scanner priority = utils.get_priority(config, scan_path) logger.debug( "Waiting for turn in the scan request backlog with priority '%d'...", priority) lock.acquire(priority) try: logger.info("Scan request is now being processed...") # wait for existing scanners being ran by Plex if config['PLEX_WAIT_FOR_EXTERNAL_SCANNERS']: scanner_name = os.path.basename(config['PLEX_SCANNER']).replace( '\\', '') if not utils.wait_running_process( scanner_name, config['USE_DOCKER'], cmd_quote(config['DOCKER_NAME'])): logger.warning( "There was a problem waiting for existing '%s' process(s) to finish. Aborting scan.", scanner_name) # remove item from database if sqlite is enabled if config['SERVER_USE_SQLITE']: if db.remove_item(path): logger.info( "Removed '%s' from Plex Autoscan database.", path) time.sleep(1) else: logger.error( "Failed removing '%s' from Plex Autoscan database.", path) return else: logger.info("No '%s' processes were found.", scanner_name) # run external command before scan if supplied if len(config['RUN_COMMAND_BEFORE_SCAN']) > 2: extCmd = config['RUN_COMMAND_BEFORE_SCAN'] for ch in [ '%config', '%lock', '%path', '%scan_for', '%section', '%scan_type', '%resleep_paths', '%scan_title', '%scan_lookup_type', '%scan_lookup_id' ]: if ch in extCmd: chf = "%(" + ch[1:] + ")s" rplc = "\"%s\"" % chf extCmd = extCmd.replace(ch, rplc) extCmd = extCmd % { 'config': config, 'lock': lock, 'path': path, 'scan_for': scan_for, 'section': section, 'scan_type': scan_type, 'resleep_paths': resleep_paths, 'scan_title': scan_title, 'scan_lookup_type': scan_lookup_type, 'scan_lookup_id': scan_lookup_id } logger.info("Running external command: %r", extCmd) utils.run_command(extCmd) logger.info("Finished running external command.") # wait for Plex to become responsive (if PLEX_CHECK_BEFORE_SCAN is enabled) if 'PLEX_CHECK_BEFORE_SCAN' in config and config[ 'PLEX_CHECK_BEFORE_SCAN']: plex_account_user = wait_plex_alive(config) if plex_account_user is not None: logger.info( "Plex is available for media scanning - (Server Account: '%s')", plex_account_user) # begin scan logger.info("Running Plex Media Scanner for: %s", scan_path) logger.debug(final_cmd) utils.run_command(final_cmd.encode("utf-8")) logger.info("Finished scan!") # remove item from Plex database if sqlite is enabled if config['SERVER_USE_SQLITE']: if db.remove_item(path): logger.debug("Removed '%s' from Plex Autoscan database.", path) time.sleep(1) logger.info("There are %d queued item(s) remaining.", db.queued_count()) else: logger.error( "Failed removing '%s' from Plex Autoscan database.", path) # empty trash if configured if config['PLEX_EMPTY_TRASH'] and config['PLEX_TOKEN'] and config[ 'PLEX_EMPTY_TRASH_MAX_FILES']: logger.debug("Checking deleted items count in 10 seconds...") time.sleep(10) # check deleted item count, don't proceed if more than this value deleted_items = get_deleted_count(config) if deleted_items > config['PLEX_EMPTY_TRASH_MAX_FILES']: logger.warning( "There were %d deleted files. Skip emptying of trash for Section '%s'.", deleted_items, section) elif deleted_items == -1: logger.error( "Could not determine deleted item count. Abort emptying of trash." ) elif not config[ 'PLEX_EMPTY_TRASH_ZERO_DELETED'] and not deleted_items and scan_type != 'Upgrade': logger.debug( "Skipping emptying trash as there were no deleted items.") else: logger.info("Emptying trash to clear %d deleted items...", deleted_items) empty_trash(config, str(section)) # analyze movie/episode if config['PLEX_ANALYZE_TYPE'].lower( ) != 'off' and not scan_path_is_directory: logger.debug("Sleeping for 10 seconds...") time.sleep(10) logger.debug("Sending analysis request...") analyze_item(config, path) # match item if config['PLEX_FIX_MISMATCHED'] and config[ 'PLEX_TOKEN'] and not scan_path_is_directory: # were we initiated with the scan_title/scan_lookup_type/scan_lookup_id parameters? if scan_title is not None and scan_lookup_type is not None and scan_lookup_id is not None: logger.debug("Sleeping for 10 seconds...") time.sleep(10) logger.debug("Validating match for '%s' (%s ID: %s)...", scan_title, scan_lookup_type, str(scan_lookup_id)) match_item_parent(config, path, scan_title, scan_lookup_type, scan_lookup_id) # run external command after scan if supplied if len(config['RUN_COMMAND_AFTER_SCAN']) > 2: extCmd = config['RUN_COMMAND_AFTER_SCAN'] for ch in [ '%config', '%lock', '%path', '%scan_for', '%section', '%scan_type', '%resleep_paths', '%scan_title', '%scan_lookup_type', '%scan_lookup_id' ]: if ch in extCmd: chf = "%(" + ch[1:] + ")s" rplc = "\"%s\"" % chf extCmd = extCmd.replace(ch, rplc) extCmd = extCmd % { 'config': config, 'lock': lock, 'path': path, 'scan_for': scan_for, 'section': section, 'scan_type': scan_type, 'resleep_paths': resleep_paths, 'scan_title': scan_title, 'scan_lookup_type': scan_lookup_type, 'scan_lookup_id': scan_lookup_id } logger.info("Running external command: %r", extCmd) utils.run_command(extCmd) logger.info("Finished running external command.") except Exception: logger.exception( "Unexpected exception occurred while processing: '%s'", scan_path) finally: lock.release() return
def scan(config, lock, path, scan_for, section, scan_type): scan_path = "" # sleep for delay if config['SERVER_SCAN_DELAY']: logger.info( "Scan request for '%s', scan delay of %d seconds. Sleeping...", path, config['SERVER_SCAN_DELAY']) time.sleep(config['SERVER_SCAN_DELAY']) else: logger.info("Scan request for '%s'", path) # check file exists if scan_for == 'radarr' or scan_for == 'sonarr_dev' or scan_for == 'manual': checks = 0 check_path = utils.map_pushed_path_file_exists(config, path) while True: checks += 1 if os.path.exists(check_path): logger.info("File '%s' exists on check %d of %d.", check_path, checks, config['SERVER_MAX_FILE_CHECKS']) scan_path = os.path.dirname(path).strip() break elif checks >= config['SERVER_MAX_FILE_CHECKS']: logger.warning( "File '%s' exhausted all available checks, aborting scan request.", check_path) return else: logger.info( "File '%s' did not exist on check %d of %d, checking again in 60 seconds.", check_path, checks, config['SERVER_MAX_FILE_CHECKS']) time.sleep(60) else: # old sonarr doesnt pass the sonarr_episodefile_path in webhook, so we cannot check until this is corrected. scan_path = path.strip() # invoke plex scanner logger.debug("Waiting for turn in the scan request backlog...") with lock: logger.info("Scan request is now being processed") # wait for existing scanners being ran by plex if config['PLEX_WAIT_FOR_EXTERNAL_SCANNERS']: scanner_name = os.path.basename(config['PLEX_SCANNER']).replace( '\\', '') if not utils.wait_running_process(scanner_name): logger.warning( "There was a problem waiting for existing '%s' process(s) to finish, aborting scan.", scanner_name) return else: logger.info("No '%s' processes were found.", scanner_name) # begin scan logger.info("Starting Plex Scanner To Scan") final_cmd = build_cmd(config, section, scan_path, 'scan') logger.debug(final_cmd) utils.run_command(final_cmd.encode("utf-8")) logger.info("Finished scan!") if config['PLEX_ANALYZE']: logger.info("Starting Plex Scanner To Analyze") final_cmd = build_cmd(config, section, scan_path, 'analyze') logger.debug(final_cmd) utils.run_command(final_cmd.encode("utf-8")) logger.info("Finished analyze!") if config['PLEX_DEEP_ANALYZE']: logger.info("Starting Plex Scanner To Deep Analyze") final_cmd = build_cmd(config, section, scan_path, 'deep') logger.debug(final_cmd) utils.run_command(final_cmd.encode("utf-8")) logger.info("Finished deep analyze!") # empty trash if configured if config['PLEX_EMPTY_TRASH'] and config['PLEX_TOKEN'] and config[ 'PLEX_EMPTY_TRASH_MAX_FILES']: logger.info("Checking deleted item count in 5 seconds...") time.sleep(5) # check deleted item count, don't proceed if more than this value deleted_items = get_deleted_count(config) if deleted_items > config['PLEX_EMPTY_TRASH_MAX_FILES']: logger.warning( "There were %d deleted files, skipping emptying trash for section %s", deleted_items, section) return if deleted_items == -1: logger.error( "Could not determine deleted item count, aborting emptying trash" ) return if not config[ 'PLEX_EMPTY_TRASH_ZERO_DELETED'] and not deleted_items and scan_type != 'Upgrade': logger.info( "Skipping emptying trash as there were no deleted items") return logger.info("Emptying trash to clear %d deleted items", deleted_items) empty_trash(config, str(section)) return