class Controller: """ Top-level class that controls the behaviour of the app """ class Command: """ Class by which clients of Controller can request Actions to be executed Supports callbacks by which clients can be notified of action success/failure Note: callbacks will be executed in Controller thread, so any heavy computation should be moved out of the callback """ class Action(Enum): QUEUE = 0 STOP = 1 EXTRACT = 2 DELETE_LOCAL = 3 DELETE_REMOTE = 4 class ICallback(ABC): """Command callback interface""" @abstractmethod def on_success(self): """Called on successful completion of action""" pass @abstractmethod def on_failure(self, error: str): """Called on action failure""" pass def __init__(self, action: Action, filename: str): self.action = action self.filename = filename self.callbacks = [] def add_callback(self, callback: ICallback): self.callbacks.append(callback) class CommandProcessWrapper: """ Wraps any one-shot command processes launched by the controller """ def __init__(self, process: AppOneShotProcess, post_callback: Callable): self.process = process self.post_callback = post_callback def __init__(self, context: Context, persist: ControllerPersist): self.__context = context self.__persist = persist self.logger = context.logger.getChild("Controller") # Decide the password here self.__password = context.config.lftp.remote_password if not context.config.lftp.use_ssh_key else None # The command queue self.__command_queue = Queue() # The model self.__model = Model() self.__model.set_base_logger(self.logger) # Lock for the model # Note: While the scanners are in a separate process, the rest of the application # is threaded in a single process. (The webserver is bottle+paste which is # multi-threaded). Therefore it is safe to use a threading Lock for the model # (the scanner processes never try to access the model) self.__model_lock = Lock() # Model builder self.__model_builder = ModelBuilder() self.__model_builder.set_base_logger(self.logger) self.__model_builder.set_downloaded_files( self.__persist.downloaded_file_names) self.__model_builder.set_extracted_files( self.__persist.extracted_file_names) # Lftp self.__lftp = Lftp(address=self.__context.config.lftp.remote_address, port=self.__context.config.lftp.remote_port, user=self.__context.config.lftp.remote_username, password=self.__password) self.__lftp.set_base_logger(self.logger) self.__lftp.set_base_remote_dir_path( self.__context.config.lftp.remote_path) self.__lftp.set_base_local_dir_path( self.__context.config.lftp.local_path) # Configure Lftp self.__lftp.num_parallel_jobs = self.__context.config.lftp.num_max_parallel_downloads self.__lftp.num_parallel_files = self.__context.config.lftp.num_max_parallel_files_per_download self.__lftp.num_connections_per_root_file = self.__context.config.lftp.num_max_connections_per_root_file self.__lftp.num_connections_per_dir_file = self.__context.config.lftp.num_max_connections_per_dir_file self.__lftp.num_max_total_connections = self.__context.config.lftp.num_max_total_connections self.__lftp.use_temp_file = self.__context.config.lftp.use_temp_file self.__lftp.temp_file_name = "*" + Constants.LFTP_TEMP_FILE_SUFFIX self.__lftp.set_verbose_logging(self.__context.config.general.verbose) # Setup the scanners and scanner processes self.__active_scanner = ActiveScanner( self.__context.config.lftp.local_path) self.__local_scanner = LocalScanner( local_path=self.__context.config.lftp.local_path, use_temp_file=self.__context.config.lftp.use_temp_file) self.__remote_scanner = RemoteScanner( remote_address=self.__context.config.lftp.remote_address, remote_username=self.__context.config.lftp.remote_username, remote_password=self.__password, remote_port=self.__context.config.lftp.remote_port, remote_path_to_scan=self.__context.config.lftp.remote_path, local_path_to_scan_script=self.__context.args.local_path_to_scanfs, remote_path_to_scan_script=self.__context.config.lftp. remote_path_to_scan_script) self.__active_scan_process = ScannerProcess( scanner=self.__active_scanner, interval_in_ms=self.__context.config.controller. interval_ms_downloading_scan, verbose=False) self.__local_scan_process = ScannerProcess( scanner=self.__local_scanner, interval_in_ms=self.__context.config.controller. interval_ms_local_scan, ) self.__remote_scan_process = ScannerProcess( scanner=self.__remote_scanner, interval_in_ms=self.__context.config.controller. interval_ms_remote_scan, ) # Setup extract process if self.__context.config.controller.use_local_path_as_extract_path: out_dir_path = self.__context.config.lftp.local_path else: out_dir_path = self.__context.config.controller.extract_path self.__extract_process = ExtractProcess( out_dir_path=out_dir_path, local_path=self.__context.config.lftp.local_path) # Setup multiprocess logging self.__mp_logger = MultiprocessingLogger(self.logger) self.__active_scan_process.set_multiprocessing_logger(self.__mp_logger) self.__local_scan_process.set_multiprocessing_logger(self.__mp_logger) self.__remote_scan_process.set_multiprocessing_logger(self.__mp_logger) self.__extract_process.set_multiprocessing_logger(self.__mp_logger) # Keep track of active files self.__active_downloading_file_names = [] self.__active_extracting_file_names = [] # Keep track of active command processes self.__active_command_processes = [] self.__started = False def start(self): """ Start the controller Must be called after ctor and before process() :return: """ self.logger.debug("Starting controller") self.__active_scan_process.start() self.__local_scan_process.start() self.__remote_scan_process.start() self.__extract_process.start() self.__mp_logger.start() self.__started = True def process(self): """ Advance the controller state This method should return relatively quickly as the heavy lifting is done by concurrent tasks :return: """ if not self.__started: raise ControllerError("Cannot process, controller is not started") self.__propagate_exceptions() self.__cleanup_commands() self.__process_commands() self.__update_model() def exit(self): self.logger.debug("Exiting controller") if self.__started: self.__lftp.exit() self.__active_scan_process.terminate() self.__local_scan_process.terminate() self.__remote_scan_process.terminate() self.__extract_process.terminate() self.__active_scan_process.join() self.__local_scan_process.join() self.__remote_scan_process.join() self.__extract_process.join() self.__mp_logger.stop() self.__started = False self.logger.info("Exited controller") def get_model_files(self) -> List[ModelFile]: """ Returns a copy of all the model files :return: """ # Lock the model self.__model_lock.acquire() model_files = self.__get_model_files() # Release the model self.__model_lock.release() return model_files def add_model_listener(self, listener: IModelListener): """ Adds a listener to the controller's model :param listener: :return: """ # Lock the model self.__model_lock.acquire() self.__model.add_listener(listener) # Release the model self.__model_lock.release() def remove_model_listener(self, listener: IModelListener): """ Removes a listener from the controller's model :param listener: :return: """ # Lock the model self.__model_lock.acquire() self.__model.remove_listener(listener) # Release the model self.__model_lock.release() def get_model_files_and_add_listener(self, listener: IModelListener): """ Adds a listener and returns the current state of model files in one atomic operation This guarantees that model update events are not missed or duplicated for the clients Without an atomic operation, the following scenarios can happen: 1. get_model() -> model updated -> add_listener() The model update never propagates to client 2. add_listener() -> model updated -> get_model() The model update is duplicated on client side (once through listener, and once through the model). :param listener: :return: """ # Lock the model self.__model_lock.acquire() self.__model.add_listener(listener) model_files = self.__get_model_files() # Release the model self.__model_lock.release() return model_files def queue_command(self, command: Command): self.__command_queue.put(command) def __get_model_files(self) -> List[ModelFile]: model_files = [] for filename in self.__model.get_file_names(): model_files.append(copy.deepcopy(self.__model.get_file(filename))) return model_files def __update_model(self): # Grab the latest scan results latest_remote_scan = self.__remote_scan_process.pop_latest_result() latest_local_scan = self.__local_scan_process.pop_latest_result() latest_active_scan = self.__active_scan_process.pop_latest_result() # Grab the Lftp status lftp_statuses = None try: lftp_statuses = self.__lftp.status() except LftpError as e: self.logger.warning("Caught lftp error: {}".format(str(e))) # Grab the latest extract results latest_extract_statuses = self.__extract_process.pop_latest_statuses() # Grab the latest extracted file names latest_extracted_results = self.__extract_process.pop_completed() # Update list of active file names if lftp_statuses is not None: self.__active_downloading_file_names = [ s.name for s in lftp_statuses if s.state == LftpJobStatus.State.RUNNING ] if latest_extract_statuses is not None: self.__active_extracting_file_names = [ s.name for s in latest_extract_statuses.statuses if s.state == ExtractStatus.State.EXTRACTING ] # Update the active scanner's state self.__active_scanner.set_active_files( self.__active_downloading_file_names + self.__active_extracting_file_names) # Update model builder state if latest_remote_scan is not None: self.__model_builder.set_remote_files(latest_remote_scan.files) if latest_local_scan is not None: self.__model_builder.set_local_files(latest_local_scan.files) if latest_active_scan is not None: self.__model_builder.set_active_files(latest_active_scan.files) if lftp_statuses is not None: self.__model_builder.set_lftp_statuses(lftp_statuses) if latest_extract_statuses is not None: self.__model_builder.set_extract_statuses( latest_extract_statuses.statuses) if latest_extracted_results: for result in latest_extracted_results: self.__persist.extracted_file_names.add(result.name) self.__model_builder.set_extracted_files( self.__persist.extracted_file_names) # Build the new model, if needed if self.__model_builder.has_changes(): new_model = self.__model_builder.build_model() # Lock the model self.__model_lock.acquire() # Diff the new model with old model model_diff = ModelDiffUtil.diff_models(self.__model, new_model) # Apply changes to the new model for diff in model_diff: if diff.change == ModelDiff.Change.ADDED: self.__model.add_file(diff.new_file) elif diff.change == ModelDiff.Change.REMOVED: self.__model.remove_file(diff.old_file.name) elif diff.change == ModelDiff.Change.UPDATED: self.__model.update_file(diff.new_file) # Detect if a file was just Downloaded # an Added file in Downloaded state # an Updated file transitioning to Downloaded state # If so, update the persist state # Note: This step is done after the new model is build because # model_builder is the one that discovers when a file is Downloaded downloaded = False if diff.change == ModelDiff.Change.ADDED and \ diff.new_file.state == ModelFile.State.DOWNLOADED: downloaded = True elif diff.change == ModelDiff.Change.UPDATED and \ diff.new_file.state == ModelFile.State.DOWNLOADED and \ diff.old_file.state != ModelFile.State.DOWNLOADED: downloaded = True if downloaded: self.__persist.downloaded_file_names.add( diff.new_file.name) self.__model_builder.set_downloaded_files( self.__persist.downloaded_file_names) # Prune the extracted files list of any files that were deleted locally # This prevents these files from going to EXTRACTED state if they are re-downloaded remove_extracted_file_names = set() existing_file_names = self.__model.get_file_names() for extracted_file_name in self.__persist.extracted_file_names: if extracted_file_name in existing_file_names: file = self.__model.get_file(extracted_file_name) if file.state == ModelFile.State.DELETED: # Deleted locally, remove remove_extracted_file_names.add(extracted_file_name) else: # Not in the model at all # This could be because local and remote scans are not yet available pass if remove_extracted_file_names: self.logger.info("Removing from extracted list: {}".format( remove_extracted_file_names)) self.__persist.extracted_file_names.difference_update( remove_extracted_file_names) self.__model_builder.set_extracted_files( self.__persist.extracted_file_names) # Release the model self.__model_lock.release() # Update the controller status if latest_remote_scan is not None: self.__context.status.controller.latest_remote_scan_time = latest_remote_scan.timestamp if latest_local_scan is not None: self.__context.status.controller.latest_local_scan_time = latest_local_scan.timestamp def __process_commands(self): def _notify_failure(_command: Controller.Command, _msg: str): self.logger.warning("Command failed. {}".format(_msg)) for _callback in _command.callbacks: _callback.on_failure(_msg) while not self.__command_queue.empty(): command = self.__command_queue.get() self.logger.info("Received command {} for file {}".format( str(command.action), command.filename)) try: file = self.__model.get_file(command.filename) except ModelError: _notify_failure(command, "File '{}' not found".format(command.filename)) continue if command.action == Controller.Command.Action.QUEUE: if file.remote_size is None: _notify_failure( command, "File '{}' does not exist remotely".format( command.filename)) continue try: self.__lftp.queue(file.name, file.is_dir) except LftpError as e: _notify_failure(command, "Lftp error: ".format(str(e))) continue elif command.action == Controller.Command.Action.STOP: if file.state not in (ModelFile.State.DOWNLOADING, ModelFile.State.QUEUED): _notify_failure( command, "File '{}' is not Queued or Downloading".format( command.filename)) continue try: self.__lftp.kill(file.name) except LftpError as e: _notify_failure(command, "Lftp error: ".format(str(e))) continue elif command.action == Controller.Command.Action.EXTRACT: # Note: We don't check the is_extractable flag because it's just a guess if file.state not in (ModelFile.State.DEFAULT, ModelFile.State.DOWNLOADED, ModelFile.State.EXTRACTED): _notify_failure( command, "File '{}' in state {} cannot be extracted".format( command.filename, str(file.state))) continue elif file.local_size is None: _notify_failure( command, "File '{}' does not exist locally".format( command.filename)) continue else: self.__extract_process.extract(file) elif command.action == Controller.Command.Action.DELETE_LOCAL: if file.state not in (ModelFile.State.DEFAULT, ModelFile.State.DOWNLOADED, ModelFile.State.EXTRACTED): _notify_failure( command, "Local file '{}' cannot be deleted in state {}".format( command.filename, str(file.state))) continue elif file.local_size is None: _notify_failure( command, "File '{}' does not exist locally".format( command.filename)) continue else: process = DeleteLocalProcess( local_path=self.__context.config.lftp.local_path, file_name=file.name) process.set_multiprocessing_logger(self.__mp_logger) post_callback = self.__local_scan_process.force_scan command_wrapper = Controller.CommandProcessWrapper( process=process, post_callback=post_callback) self.__active_command_processes.append(command_wrapper) command_wrapper.process.start() elif command.action == Controller.Command.Action.DELETE_REMOTE: if file.state not in (ModelFile.State.DEFAULT, ModelFile.State.DOWNLOADED, ModelFile.State.EXTRACTED, ModelFile.State.DELETED): _notify_failure( command, "Remote file '{}' cannot be deleted in state {}". format(command.filename, str(file.state))) continue elif file.remote_size is None: _notify_failure( command, "File '{}' does not exist remotely".format( command.filename)) continue else: process = DeleteRemoteProcess( remote_address=self.__context.config.lftp. remote_address, remote_username=self.__context.config.lftp. remote_username, remote_password=self.__password, remote_port=self.__context.config.lftp.remote_port, remote_path=self.__context.config.lftp.remote_path, file_name=file.name) process.set_multiprocessing_logger(self.__mp_logger) post_callback = self.__remote_scan_process.force_scan command_wrapper = Controller.CommandProcessWrapper( process=process, post_callback=post_callback) self.__active_command_processes.append(command_wrapper) command_wrapper.process.start() # If we get here, it was a success for callback in command.callbacks: callback.on_success() def __propagate_exceptions(self): """ Propagate any exceptions from child processes/threads to this thread :return: """ self.__lftp.raise_pending_error() self.__active_scan_process.propagate_exception() self.__local_scan_process.propagate_exception() self.__remote_scan_process.propagate_exception() self.__mp_logger.propagate_exception() self.__extract_process.propagate_exception() def __cleanup_commands(self): """ Cleanup the list of active commands and do any callbacks :return: """ still_active_processes = [] for command_process in self.__active_command_processes: if command_process.process.is_alive(): still_active_processes.append(command_process) else: # Do the post callback command_process.post_callback() # Propagate the exception command_process.process.propagate_exception() self.__active_command_processes = still_active_processes
class TestLftp(unittest.TestCase): temp_dir = None @classmethod def setUpClass(cls): # Create a temp directory TestLftp.temp_dir = tempfile.mkdtemp(prefix="test_lftp_") # Create some test directories # remote [dir] for remote path # a [dir] # aa [file, 24*1024 bytes] # ab [file, 2*1024*1024 bytes] # b [dir] # ba [dir] # baa [file, 128*1024 bytes] # bab [file, 128*1024 bytes] # bb [file, 128*1024 bytes] # c [file, 1234 bytes] # "d d" [file, 128*1024 bytes] # "e e" [dir] # "e e a" [file, 128*1024 bytes] # local [dir] for local path, cleared before every test def my_mkdir(*args): os.mkdir(os.path.join(TestLftp.temp_dir, *args)) def my_touch(size, *args): path = os.path.join(TestLftp.temp_dir, *args) with open(path, 'wb') as f: f.write(bytearray([0xff] * size)) my_mkdir("remote") my_mkdir("remote", "a") my_touch(24 * 1024, "remote", "a", "aa") my_touch(24 * 1024 * 1024, "remote", "a", "ab") my_mkdir("remote", "b") my_mkdir("remote", "b", "ba") my_touch(128 * 1024, "remote", "b", "ba", "baa") my_touch(128 * 1024, "remote", "b", "ba", "bab") my_touch(128 * 1024, "remote", "b", "bb") my_touch(1234, "remote", "c") my_touch(128 * 1024, "remote", "d d") my_mkdir("remote", "e e") my_touch(128 * 1024, "remote", "e e", "e e a") my_mkdir("local") @classmethod def tearDownClass(cls): # Cleanup shutil.rmtree(TestLftp.temp_dir) def setUp(self): # Delete and recreate the local dir shutil.rmtree(os.path.join(TestLftp.temp_dir, "local")) os.mkdir(os.path.join(TestLftp.temp_dir, "local")) # Create default lftp instance # Note: password-less ssh needs to be setup # i.e. user's public key needs to be in authorized_keys # cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys self.lftp = Lftp(address="localhost", port=22, user=getpass.getuser(), password="") self.lftp.set_base_remote_dir_path( os.path.join(TestLftp.temp_dir, "remote")) self.lftp.set_base_local_dir_path( os.path.join(TestLftp.temp_dir, "local")) logger = logging.getLogger("TestLftp") logger.setLevel(logging.DEBUG) handler = logging.StreamHandler(sys.stdout) formatter = logging.Formatter( "%(asctime)s - %(levelname)s - %(name)s - %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) self.lftp.set_base_logger(logger) def tearDown(self): self.lftp.exit() def test_num_connections_per_dir_file(self): self.lftp.num_connections_per_dir_file = 5 self.assertEqual(5, self.lftp.num_connections_per_dir_file) with self.assertRaises(ValueError): self.lftp.num_connections_per_dir_file = -1 def test_num_connections_per_root_file(self): self.lftp.num_connections_per_root_file = 5 self.assertEqual(5, self.lftp.num_connections_per_root_file) with self.assertRaises(ValueError): self.lftp.num_connections_per_root_file = -1 def test_num_parallel_files(self): self.lftp.num_parallel_files = 5 self.assertEqual(5, self.lftp.num_parallel_files) with self.assertRaises(ValueError): self.lftp.num_parallel_files = -1 def test_num_max_total_connections(self): self.lftp.num_max_total_connections = 5 self.assertEqual(5, self.lftp.num_max_total_connections) self.lftp.num_max_total_connections = 0 self.assertEqual(0, self.lftp.num_max_total_connections) with self.assertRaises(ValueError): self.lftp.num_max_total_connections = -1 def test_rate_limit(self): self.lftp.rate_limit = 500 self.assertEqual("500", self.lftp.rate_limit) self.lftp.rate_limit = "2k" self.assertEqual("2k", self.lftp.rate_limit) self.lftp.rate_limit = "1M" self.assertEqual("1M", self.lftp.rate_limit) def test_min_chunk_size(self): self.lftp.min_chunk_size = 500 self.assertEqual("500", self.lftp.min_chunk_size) self.lftp.min_chunk_size = "2k" self.assertEqual("2k", self.lftp.min_chunk_size) self.lftp.min_chunk_size = "1M" self.assertEqual("1M", self.lftp.min_chunk_size) def test_num_parallel_jobs(self): self.lftp.num_parallel_jobs = 5 self.assertEqual(5, self.lftp.num_parallel_jobs) with self.assertRaises(ValueError): self.lftp.num_parallel_jobs = -1 def test_move_background_on_exit(self): self.lftp.move_background_on_exit = True self.assertEqual(True, self.lftp.move_background_on_exit) self.lftp.move_background_on_exit = False self.assertEqual(False, self.lftp.move_background_on_exit) def test_status_empty(self): statuses = self.lftp.status() self.assertEqual(0, len(statuses)) def test_queue_file(self): self.lftp.queue("c", False) statuses = self.lftp.status() self.assertEqual(1, len(statuses)) self.assertEqual("c", statuses[0].name) self.assertEqual(LftpJobStatus.Type.PGET, statuses[0].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) def test_queue_dir(self): self.lftp.queue("a", True) statuses = self.lftp.status() self.assertEqual(1, len(statuses)) self.assertEqual("a", statuses[0].name) self.assertEqual(LftpJobStatus.Type.MIRROR, statuses[0].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) def test_queue_file_with_spaces(self): self.lftp.queue("d d", False) statuses = self.lftp.status() self.assertEqual(1, len(statuses)) self.assertEqual("d d", statuses[0].name) self.assertEqual(LftpJobStatus.Type.PGET, statuses[0].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) def test_queue_dir_with_spaces(self): self.lftp.queue("e e", True) statuses = self.lftp.status() self.assertEqual(1, len(statuses)) self.assertEqual("e e", statuses[0].name) self.assertEqual(LftpJobStatus.Type.MIRROR, statuses[0].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) def test_queue_num_parallel_jobs(self): self.lftp.num_parallel_jobs = 2 self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.queue("a", True) self.lftp.queue("c", False) self.lftp.queue("b", True) statuses = self.lftp.status() self.assertEqual(3, len(statuses)) # queued jobs self.assertEqual("b", statuses[0].name) self.assertEqual(LftpJobStatus.Type.MIRROR, statuses[0].type) self.assertEqual(LftpJobStatus.State.QUEUED, statuses[0].state) # running jobs self.assertEqual("a", statuses[1].name) self.assertEqual(LftpJobStatus.Type.MIRROR, statuses[1].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[1].state) self.assertEqual("c", statuses[2].name) self.assertEqual(LftpJobStatus.Type.PGET, statuses[2].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[2].state) def test_kill_all(self): self.lftp.num_parallel_jobs = 2 self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.queue("a", True) self.lftp.queue("c", False) self.lftp.queue("b", True) statuses = self.lftp.status() self.assertEqual(3, len(statuses)) self.lftp.kill_all() statuses = self.lftp.status() self.assertEqual(0, len(statuses)) def test_kill_all_and_queue_again(self): self.lftp.num_parallel_jobs = 2 self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.queue("a", True) self.lftp.queue("c", False) self.lftp.queue("b", True) statuses = self.lftp.status() self.assertEqual(3, len(statuses)) self.lftp.kill_all() statuses = self.lftp.status() self.assertEqual(0, len(statuses)) self.lftp.queue("b", True) statuses = self.lftp.status() self.assertEqual(1, len(statuses)) self.assertEqual("b", statuses[0].name) self.assertEqual(LftpJobStatus.Type.MIRROR, statuses[0].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) def test_kill_queued_job(self): self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.num_parallel_jobs = 1 self.lftp.queue("a", True) # this job will run self.lftp.queue("b", True) # this job will queue statuses = self.lftp.status() self.assertEqual(2, len(statuses)) self.assertEqual("b", statuses[0].name) self.assertEqual(LftpJobStatus.State.QUEUED, statuses[0].state) self.assertEqual("a", statuses[1].name) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[1].state) self.assertEqual(True, self.lftp.kill("b")) statuses = self.lftp.status() self.assertEqual(1, len(statuses)) self.assertEqual("a", statuses[0].name) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) def test_kill_running_job(self): self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.queue("a", True) statuses = self.lftp.status() self.assertEqual(1, len(statuses)) self.assertEqual("a", statuses[0].name) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) self.assertEqual(True, self.lftp.kill("a")) statuses = self.lftp.status() self.assertEqual(0, len(statuses)) def test_kill_missing_job(self): self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.queue("a", True) statuses = self.lftp.status() self.assertEqual(1, len(statuses)) self.assertEqual("a", statuses[0].name) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) self.assertEqual(False, self.lftp.kill("b")) self.assertEqual(True, self.lftp.kill("a")) statuses = self.lftp.status() self.assertEqual(0, len(statuses)) def test_kill_job_1(self): """Queued and running jobs killed one at a time""" self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.num_parallel_jobs = 2 # 2 jobs running, 3 jobs queued self.lftp.queue("a", True) # running self.lftp.queue("d d", False) # running self.lftp.queue("b", True) # queued self.lftp.queue("c", False) # queued self.lftp.queue("e e", True) # queued Q = LftpJobStatus.State.QUEUED R = LftpJobStatus.State.RUNNING statuses = self.lftp.status() self.assertEqual(5, len(statuses)) self.assertEqual(["b", "c", "e e", "a", "d d"], [s.name for s in statuses]) self.assertEqual([Q, Q, Q, R, R], [s.state for s in statuses]) # kill the queued jobs one-by-one self.lftp.kill("c") statuses = self.lftp.status() self.assertEqual(4, len(statuses)) self.assertEqual(["b", "e e", "a", "d d"], [s.name for s in statuses]) self.assertEqual([Q, Q, R, R], [s.state for s in statuses]) self.lftp.kill("b") statuses = self.lftp.status() self.assertEqual(3, len(statuses)) self.assertEqual(["e e", "a", "d d"], [s.name for s in statuses]) self.assertEqual([Q, R, R], [s.state for s in statuses]) self.lftp.kill("e e") statuses = self.lftp.status() self.assertEqual(2, len(statuses)) self.assertEqual(["a", "d d"], [s.name for s in statuses]) self.assertEqual([R, R], [s.state for s in statuses]) # kill the running jobs one-by-one self.lftp.kill("d d") statuses = self.lftp.status() self.assertEqual(1, len(statuses)) self.assertEqual("a", statuses[0].name) self.assertEqual(R, statuses[0].state) self.lftp.kill("a") statuses = self.lftp.status() self.assertEqual(0, len(statuses)) def test_queued_and_kill_jobs_1(self): """Queued and running jobs killed one at a time""" self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.num_parallel_jobs = 2 Q = LftpJobStatus.State.QUEUED R = LftpJobStatus.State.RUNNING # add 3 jobs - a, dd, b self.lftp.queue("a", True) self.lftp.queue("d d", False) self.lftp.queue("b", True) statuses = self.lftp.status() self.assertEqual(3, len(statuses)) self.assertEqual(["b", "a", "d d"], [s.name for s in statuses]) self.assertEqual([Q, R, R], [s.state for s in statuses]) # remove dd (running) self.lftp.kill("d d") statuses = self.lftp.status() self.assertEqual(2, len(statuses)) self.assertEqual(["a", "b"], [s.name for s in statuses]) self.assertEqual([R, R], [s.state for s in statuses]) # remove a (running) self.lftp.kill("a") statuses = self.lftp.status() self.assertEqual(1, len(statuses)) self.assertEqual(["b"], [s.name for s in statuses]) self.assertEqual([R], [s.state for s in statuses]) # add 3 jobs - c, ee, a self.lftp.queue("c", False) self.lftp.queue("e e", True) self.lftp.queue("a", True) statuses = self.lftp.status() self.assertEqual(4, len(statuses)) self.assertEqual(["e e", "a", "b", "c"], [s.name for s in statuses]) self.assertEqual([Q, Q, R, R], [s.state for s in statuses]) # remove ee (queued) and b (running) self.lftp.kill("e e") statuses = self.lftp.status() self.assertEqual(3, len(statuses)) self.assertEqual(["a", "b", "c"], [s.name for s in statuses]) self.assertEqual([Q, R, R], [s.state for s in statuses]) self.lftp.kill("b") statuses = self.lftp.status() self.assertEqual(2, len(statuses)) self.assertEqual(["c", "a"], [s.name for s in statuses]) self.assertEqual([R, R], [s.state for s in statuses]) # remove all self.lftp.kill_all() statuses = self.lftp.status() self.assertEqual(0, len(statuses)) def test_queue_wrong_file_type(self): # check that queueing a file with MIRROR and a dir with PGET fails gracefully self.lftp.num_parallel_jobs = 5 # passing dir as a file print("Queuing dir as a file") self.lftp.queue("a", False) time.sleep(0.5) # wait for jobs to connect print("Error'ed command") self.assertEqual(5, self.lftp.num_parallel_jobs) # next status should be empty print("Getting empty status") statuses = self.lftp.status() self.assertEqual(0, len(statuses)) # passing file as a dir print("Queuing file as a dir") self.lftp.queue("c", True) time.sleep(0.5) # wait for jobs to connect print("Error'ed command") self.assertEqual(5, self.lftp.num_parallel_jobs) # next status should be empty print("Getting empty status") statuses = self.lftp.status() self.assertEqual(0, len(statuses)) def test_queue_missing_file(self): # check that queueing non-existing file fails gracefully self.lftp.num_parallel_jobs = 5 self.lftp.queue("non-existing-file", False) time.sleep(0.5) # wait for jobs to connect print("Error'ed command") self.assertEqual(5, self.lftp.num_parallel_jobs) # next status should be empty print("Getting empty status") statuses = self.lftp.status() self.assertEqual(0, len(statuses)) self.lftp.queue("non-existing-folder", True) time.sleep(0.5) # wait for jobs to connect print("Error'ed command") self.assertEqual(5, self.lftp.num_parallel_jobs) # next status should be empty print("Getting empty status") statuses = self.lftp.status() self.assertEqual(0, len(statuses))
class TestLftp(unittest.TestCase): temp_dir = None @classmethod def setUpClass(cls): # Create a temp directory TestLftp.temp_dir = tempfile.mkdtemp(prefix="test_lftp_") print(f"Temp dir: {TestLftp.temp_dir}") # Allow group access for the seedsynctest account TestUtils.chmod_from_to(TestLftp.temp_dir, tempfile.gettempdir(), 0o775) # Create some test directories # remote [dir] for remote path # a [dir] # aa [file, 24*1024 bytes] # ab [file, 2*1024*1024 bytes] # b [dir] # ba [dir] # baa [file, 128*1024 bytes] # bab [file, 128*1024 bytes] # bb [file, 128*1024 bytes] # c [file, 1234 bytes] # "d d" [file, 128*1024 bytes] # "e e" [dir] # "e e a" [file, 128*1024 bytes] # áßç [dir] # dőÀ [file, 128*1024 bytes] # üæÒ [file, 256*1024 bytes] # local [dir] for local path, cleared before every test def my_mkdir(*args): os.mkdir(os.path.join(TestLftp.temp_dir, *args)) def my_touch(size, *args): path = os.path.join(TestLftp.temp_dir, *args) with open(path, 'wb') as f: f.write(bytearray([0xff] * size)) def my_mkdir_latin(*args): os.mkdir(os.path.join(TestLftp.temp_dir.encode('latin-1'), *args)) def my_touch_latin(size, *args): path = os.path.join(TestLftp.temp_dir.encode('latin-1'), *args) with open(path, 'wb') as f: f.write(bytearray([0xff] * size)) my_mkdir("remote") my_mkdir("remote", "a") my_touch(24 * 1024, "remote", "a", "aa") my_touch(24 * 1024 * 1024, "remote", "a", "ab") my_mkdir("remote", "b") my_mkdir("remote", "b", "ba") my_touch(128 * 1024, "remote", "b", "ba", "baa") my_touch(128 * 1024, "remote", "b", "ba", "bab") my_touch(128 * 1024, "remote", "b", "bb") my_touch(1234, "remote", "c") my_touch(128 * 1024, "remote", "d d") my_mkdir("remote", "e e") my_touch(128 * 1024, "remote", "e e", "e e a") my_mkdir("remote", "áßç") my_touch(128 * 1024, "remote", "áßç", "dőÀ") my_touch(256 * 1024, "remote", "üæÒ") my_mkdir_latin(b"remote", b"f\xe9g") my_touch_latin(128 * 1024, b"remote", b"f\xe9g", b"d\xe9f") my_touch_latin(256 * 1024, b"remote", b"g\xe9h") my_mkdir_latin(b"remote", b"latin") my_touch_latin(128 * 1024, b"remote", b"latin", b"d\xe9f") my_mkdir("local") @classmethod def tearDownClass(cls): # Cleanup shutil.rmtree(TestLftp.temp_dir) def setUp(self): # Delete and recreate the local dir shutil.rmtree(os.path.join(TestLftp.temp_dir, "local")) os.mkdir(os.path.join(TestLftp.temp_dir, "local")) self.local_dir = os.path.join(TestLftp.temp_dir, "local") self.remote_dir = os.path.join(TestLftp.temp_dir, "remote") # Note: seedsynctest account must be set up. See DeveloperReadme.md for details self.host = "localhost" self.port = 22 self.user = "******" self.password = "******" # Default lftp instance - use key-based login self.lftp = Lftp(address=self.host, port=self.port, user=self.user, password=None) self.lftp.set_base_remote_dir_path(self.remote_dir) self.lftp.set_base_local_dir_path(self.local_dir) self.lftp.set_verbose_logging(True) logger = logging.getLogger() logger.setLevel(logging.DEBUG) handler = logging.StreamHandler(sys.stdout) formatter = logging.Formatter( "%(asctime)s - %(levelname)s - %(name)s - %(message)s") handler.setFormatter(formatter) logger.addHandler(handler) def tearDown(self): self.lftp.raise_pending_error() self.lftp.exit() def test_num_connections_per_dir_file(self): self.lftp.num_connections_per_dir_file = 5 self.assertEqual(5, self.lftp.num_connections_per_dir_file) with self.assertRaises(ValueError): self.lftp.num_connections_per_dir_file = -1 def test_num_connections_per_root_file(self): self.lftp.num_connections_per_root_file = 5 self.assertEqual(5, self.lftp.num_connections_per_root_file) with self.assertRaises(ValueError): self.lftp.num_connections_per_root_file = -1 def test_num_parallel_files(self): self.lftp.num_parallel_files = 5 self.assertEqual(5, self.lftp.num_parallel_files) with self.assertRaises(ValueError): self.lftp.num_parallel_files = -1 def test_num_max_total_connections(self): self.lftp.num_max_total_connections = 5 self.assertEqual(5, self.lftp.num_max_total_connections) self.lftp.num_max_total_connections = 0 self.assertEqual(0, self.lftp.num_max_total_connections) with self.assertRaises(ValueError): self.lftp.num_max_total_connections = -1 def test_rate_limit(self): self.lftp.rate_limit = 500 self.assertEqual("500", self.lftp.rate_limit) self.lftp.rate_limit = "2k" self.assertEqual("2k", self.lftp.rate_limit) self.lftp.rate_limit = "1M" self.assertEqual("1M", self.lftp.rate_limit) def test_min_chunk_size(self): self.lftp.min_chunk_size = 500 self.assertEqual("500", self.lftp.min_chunk_size) self.lftp.min_chunk_size = "2k" self.assertEqual("2k", self.lftp.min_chunk_size) self.lftp.min_chunk_size = "1M" self.assertEqual("1M", self.lftp.min_chunk_size) def test_num_parallel_jobs(self): self.lftp.num_parallel_jobs = 5 self.assertEqual(5, self.lftp.num_parallel_jobs) with self.assertRaises(ValueError): self.lftp.num_parallel_jobs = -1 def test_move_background_on_exit(self): self.lftp.move_background_on_exit = True self.assertEqual(True, self.lftp.move_background_on_exit) self.lftp.move_background_on_exit = False self.assertEqual(False, self.lftp.move_background_on_exit) def test_use_temp_file(self): self.lftp.use_temp_file = True self.assertEqual(True, self.lftp.use_temp_file) self.lftp.use_temp_file = False self.assertEqual(False, self.lftp.use_temp_file) def test_temp_file_name(self): self.lftp.temp_file_name = "*.lftp" self.assertEqual("*.lftp", self.lftp.temp_file_name) self.lftp.temp_file_name = "*.temp" self.assertEqual("*.temp", self.lftp.temp_file_name) def test_sftp_auto_confirm(self): self.lftp.sftp_auto_confirm = True self.assertEqual(True, self.lftp.sftp_auto_confirm) self.lftp.sftp_auto_confirm = False self.assertEqual(False, self.lftp.sftp_auto_confirm) def test_sftp_connect_program(self): self.lftp.sftp_connect_program = "program -a -f" self.assertEqual("\"program -a -f\"", self.lftp.sftp_connect_program) self.lftp.sftp_connect_program = "\"abc -d\"" self.assertEqual("\"abc -d\"", self.lftp.sftp_connect_program) def test_status_empty(self): statuses = self.lftp.status() self.assertEqual(0, len(statuses)) @timeout_decorator.timeout(5) def test_queue_file(self): self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.queue("c", False) while True: statuses = self.lftp.status() if len(statuses) > 0: break self.assertEqual(1, len(statuses)) self.assertEqual("c", statuses[0].name) self.assertEqual(LftpJobStatus.Type.PGET, statuses[0].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) @timeout_decorator.timeout(5) def test_queue_dir(self): self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.queue("a", True) while True: statuses = self.lftp.status() if len(statuses) > 0: break self.assertEqual(1, len(statuses)) self.assertEqual("a", statuses[0].name) self.assertEqual(LftpJobStatus.Type.MIRROR, statuses[0].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) @timeout_decorator.timeout(5) def test_queue_file_with_spaces(self): self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.queue("d d", False) while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) > 0: break self.assertEqual(1, len(statuses)) self.assertEqual("d d", statuses[0].name) self.assertEqual(LftpJobStatus.Type.PGET, statuses[0].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) @timeout_decorator.timeout(5) def test_queue_dir_with_spaces(self): self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.queue("e e", True) while True: statuses = self.lftp.status() if len(statuses) > 0: break self.assertEqual(1, len(statuses)) self.assertEqual("e e", statuses[0].name) self.assertEqual(LftpJobStatus.Type.MIRROR, statuses[0].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) @timeout_decorator.timeout(5) def test_queue_file_with_unicode(self): self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.queue("üæÒ", False) while True: statuses = self.lftp.status() if len(statuses) > 0: break self.assertEqual(1, len(statuses)) self.assertEqual("üæÒ", statuses[0].name) self.assertEqual(LftpJobStatus.Type.PGET, statuses[0].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) @timeout_decorator.timeout(5) def test_queue_dir_with_latin(self): self.lftp.rate_limit = 100 # so jobs don't finish right away self.lftp.queue("latin", True) while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) > 0: break self.assertEqual(1, len(statuses)) self.assertEqual("latin", statuses[0].name) self.assertEqual(LftpJobStatus.Type.MIRROR, statuses[0].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) # Download over 100 bytes without errors while True: statuses = self.lftp.status() self.lftp.raise_pending_error() size_local = statuses[0].total_transfer_state.size_local if size_local and size_local > 100: break @timeout_decorator.timeout(5) def test_queue_dir_with_unicode(self): self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.queue("áßç", True) while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) > 0: break self.assertEqual(1, len(statuses)) self.assertEqual("áßç", statuses[0].name) self.assertEqual(LftpJobStatus.Type.MIRROR, statuses[0].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) @timeout_decorator.timeout(5) def test_queue_num_parallel_jobs(self): self.lftp.num_parallel_jobs = 2 self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.queue("a", True) self.lftp.queue("c", False) self.lftp.queue("b", True) while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) > 2: break self.assertEqual(3, len(statuses)) # queued jobs self.assertEqual("b", statuses[0].name) self.assertEqual(LftpJobStatus.Type.MIRROR, statuses[0].type) self.assertEqual(LftpJobStatus.State.QUEUED, statuses[0].state) # running jobs self.assertEqual("a", statuses[1].name) self.assertEqual(LftpJobStatus.Type.MIRROR, statuses[1].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[1].state) self.assertEqual("c", statuses[2].name) self.assertEqual(LftpJobStatus.Type.PGET, statuses[2].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[2].state) @timeout_decorator.timeout(5) def test_kill_all(self): self.lftp.num_parallel_jobs = 2 self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.queue("a", True) self.lftp.queue("c", False) self.lftp.queue("b", True) while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) > 2: break self.assertEqual(3, len(statuses)) self.lftp.kill_all() statuses = self.lftp.status() while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) == 0: break statuses = self.lftp.status() self.assertEqual(0, len(statuses)) @timeout_decorator.timeout(5) def test_kill_all_and_queue_again(self): self.lftp.num_parallel_jobs = 2 self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.queue("a", True) self.lftp.queue("c", False) self.lftp.queue("b", True) while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) > 2: break self.assertEqual(3, len(statuses)) self.lftp.kill_all() while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) == 0: break self.assertEqual(0, len(statuses)) self.lftp.queue("b", True) while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) > 0: break self.assertEqual(1, len(statuses)) self.assertEqual("b", statuses[0].name) self.assertEqual(LftpJobStatus.Type.MIRROR, statuses[0].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) @timeout_decorator.timeout(5) def test_kill_queued_job(self): self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.num_parallel_jobs = 1 self.lftp.queue("a", True) # this job will run self.lftp.queue("b", True) # this job will queue while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) > 1: break self.assertEqual(2, len(statuses)) self.assertEqual("b", statuses[0].name) self.assertEqual(LftpJobStatus.State.QUEUED, statuses[0].state) self.assertEqual("a", statuses[1].name) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[1].state) self.assertEqual(True, self.lftp.kill("b")) while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) > 0: break self.assertEqual(1, len(statuses)) self.assertEqual("a", statuses[0].name) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) @timeout_decorator.timeout(5) def test_kill_running_job(self): self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.queue("a", True) while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) > 0: break self.assertEqual(1, len(statuses)) self.assertEqual("a", statuses[0].name) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) self.assertEqual(True, self.lftp.kill("a")) while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) == 0: break self.assertEqual(0, len(statuses)) @timeout_decorator.timeout(5) def test_kill_missing_job(self): self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.queue("a", True) while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) > 0: break self.assertEqual(1, len(statuses)) self.assertEqual("a", statuses[0].name) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) self.assertEqual(False, self.lftp.kill("b")) self.assertEqual(True, self.lftp.kill("a")) while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) == 0: break self.assertEqual(0, len(statuses)) @timeout_decorator.timeout(5) def test_kill_job_1(self): """Queued and running jobs killed one at a time""" self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.num_parallel_jobs = 2 # 2 jobs running, 3 jobs queued self.lftp.queue("a", True) # running self.lftp.queue("d d", False) # running self.lftp.queue("b", True) # queued self.lftp.queue("c", False) # queued self.lftp.queue("e e", True) # queued Q = LftpJobStatus.State.QUEUED R = LftpJobStatus.State.RUNNING while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) > 4: break self.assertEqual(5, len(statuses)) self.assertEqual(["b", "c", "e e", "a", "d d"], [s.name for s in statuses]) self.assertEqual([Q, Q, Q, R, R], [s.state for s in statuses]) # kill the queued jobs one-by-one self.lftp.kill("c") while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) == 4: break self.assertEqual(4, len(statuses)) self.assertEqual(["b", "e e", "a", "d d"], [s.name for s in statuses]) self.assertEqual([Q, Q, R, R], [s.state for s in statuses]) self.lftp.kill("b") while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) == 3: break self.assertEqual(3, len(statuses)) self.assertEqual(["e e", "a", "d d"], [s.name for s in statuses]) self.assertEqual([Q, R, R], [s.state for s in statuses]) self.lftp.kill("e e") while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) == 2: break self.assertEqual(2, len(statuses)) self.assertEqual(["a", "d d"], [s.name for s in statuses]) self.assertEqual([R, R], [s.state for s in statuses]) # kill the running jobs one-by-one self.lftp.kill("d d") statuses = self.lftp.status() while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) == 1: break self.assertEqual("a", statuses[0].name) self.assertEqual(R, statuses[0].state) self.lftp.kill("a") while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) == 0: break self.assertEqual(0, len(statuses)) @timeout_decorator.timeout(5) def test_queued_and_kill_jobs_1(self): """Queued and running jobs killed one at a time""" self.lftp.rate_limit = 10 # so jobs don't finish right away self.lftp.num_parallel_jobs = 2 Q = LftpJobStatus.State.QUEUED R = LftpJobStatus.State.RUNNING # add 3 jobs - a, dd, b self.lftp.queue("a", True) self.lftp.queue("d d", False) self.lftp.queue("b", True) while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) > 2: break self.assertEqual(3, len(statuses)) self.assertEqual(["b", "a", "d d"], [s.name for s in statuses]) self.assertEqual([Q, R, R], [s.state for s in statuses]) # remove dd (running) self.lftp.kill("d d") while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) == 2: break self.assertEqual(2, len(statuses)) self.assertEqual(["a", "b"], [s.name for s in statuses]) self.assertEqual([R, R], [s.state for s in statuses]) # remove a (running) self.lftp.kill("a") while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) == 1: break self.assertEqual(1, len(statuses)) self.assertEqual(["b"], [s.name for s in statuses]) self.assertEqual([R], [s.state for s in statuses]) # add 3 jobs - c, ee, a self.lftp.queue("c", False) self.lftp.queue("e e", True) self.lftp.queue("a", True) while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) == 4: break self.assertEqual(4, len(statuses)) self.assertEqual(["e e", "a", "b", "c"], [s.name for s in statuses]) self.assertEqual([Q, Q, R, R], [s.state for s in statuses]) # remove ee (queued) and b (running) self.lftp.kill("e e") while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) == 3: break self.assertEqual(3, len(statuses)) self.assertEqual(["a", "b", "c"], [s.name for s in statuses]) self.assertEqual([Q, R, R], [s.state for s in statuses]) self.lftp.kill("b") while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) == 2: break self.assertEqual(2, len(statuses)) self.assertEqual(["c", "a"], [s.name for s in statuses]) self.assertEqual([R, R], [s.state for s in statuses]) # remove all self.lftp.kill_all() while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) == 0: break self.assertEqual(0, len(statuses)) @timeout_decorator.timeout(5) def test_queue_dir_wrong_file_type(self): """check that queueing a dir with PGET fails gracefully""" # passing dir as a file print("Queuing dir as a file") self.lftp.queue("a", False) # wait for command to fail while True: statuses = self.lftp.status() if len(statuses) == 0: break with self.assertRaises(LftpError) as ctx: self.lftp.raise_pending_error() self.assertTrue("Access failed" in str(ctx.exception)) # next status should be empty print("Getting empty status") statuses = self.lftp.status() self.assertEqual(0, len(statuses)) @timeout_decorator.timeout(5) def test_queue_file_wrong_file_type(self): """check that queueing a file with MIRROR fails gracefully""" # passing file as a dir print("Queuing file as a dir") self.lftp.queue("c", True) # wait for command to fail while True: statuses = self.lftp.status() if len(statuses) == 0: break with self.assertRaises(LftpError) as ctx: self.lftp.raise_pending_error() self.assertTrue("Access failed" in str(ctx.exception)) # next status should be empty print("Getting empty status") statuses = self.lftp.status() self.assertEqual(0, len(statuses)) @timeout_decorator.timeout(5) def test_queue_missing_file(self): """check that queueing non-existing file fails gracefully""" self.lftp.queue("non-existing-file", False) # wait for command to fail while True: statuses = self.lftp.status() if len(statuses) == 0: break with self.assertRaises(LftpError) as ctx: self.lftp.raise_pending_error() self.assertTrue("No such file" in str(ctx.exception)) # next status should be empty print("Getting empty status") statuses = self.lftp.status() self.assertEqual(0, len(statuses)) @timeout_decorator.timeout(5) def test_queue_missing_dir(self): """check that queueing non-existing directory fails gracefully""" self.lftp.queue("non-existing-folder", True) # wait for command to fail while True: statuses = self.lftp.status() if len(statuses) == 0: break with self.assertRaises(LftpError) as ctx: self.lftp.raise_pending_error() self.assertTrue("No such file" in str(ctx.exception)) # next status should be empty print("Getting empty status") statuses = self.lftp.status() self.assertEqual(0, len(statuses)) @timeout_decorator.timeout(5) def test_password_auth(self): # exit the default instance self.lftp.exit() self.lftp = Lftp(address=self.host, port=self.port, user=self.user, password=self.password) self.lftp.set_base_remote_dir_path(self.remote_dir) self.lftp.set_base_local_dir_path(self.local_dir) self.lftp.set_verbose_logging(True) # Disable key-based auth program = self.lftp.sftp_connect_program program = program[:-1] # remove the end double-quote program += " -oPubkeyAuthentication=no\"" self.lftp.sftp_connect_program = program self.lftp.queue("a", True) while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) > 0: break self.assertEqual(1, len(statuses)) self.assertEqual("a", statuses[0].name) self.assertEqual(LftpJobStatus.Type.MIRROR, statuses[0].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) # Wait for empty status while True: statuses = self.lftp.status() self.lftp.raise_pending_error() if len(statuses) == 0: break self.lftp.raise_pending_error() @timeout_decorator.timeout(15) def test_error_bad_password(self): # exit the default instance self.lftp.exit() self.lftp = Lftp(address=self.host, port=self.port, user=self.user, password="******") self.lftp.set_base_remote_dir_path(self.remote_dir) self.lftp.set_base_local_dir_path(self.local_dir) self.lftp.set_verbose_logging(True) self.lftp.rate_limit = 10 # so jobs don't finish right away # Disable key-based auth program = self.lftp.sftp_connect_program program = program[:-1] # remove the end double-quote program += " -oPubkeyAuthentication=no\"" self.lftp.sftp_connect_program = program self.lftp.queue("a", True) while True: statuses = self.lftp.status() if len(statuses) > 0: break self.assertEqual(1, len(statuses)) self.assertEqual("a", statuses[0].name) self.assertEqual(LftpJobStatus.Type.MIRROR, statuses[0].type) self.assertEqual(LftpJobStatus.State.RUNNING, statuses[0].state) # Wait for empty status while True: statuses = self.lftp.status() if len(statuses) == 0: break with self.assertRaises(LftpError) as ctx: self.lftp.raise_pending_error() self.assertTrue("Login failed: Login incorrect" in str(ctx.exception))