def _delete_worker(name, normal_shutdown=False): """ Delete the Worker with _id name from the database, cancel any associated tasks and reservations If the worker shutdown normally, no message is logged, otherwise an error level message is logged. Default is to assume the worker did not shut down normally. Any resource reservations associated with this worker are cleaned up by this function. Any tasks associated with this worker are explicitly canceled. :param name: The name of the worker you wish to delete. :type name: basestring :param normal_shutdown: True if the worker shutdown normally, False otherwise. Defaults to False. :type normal_shutdown: bool """ if normal_shutdown is False: msg = _('The worker named %(name)s is missing. Canceling the tasks in its queue.') msg = msg % {'name': name} _logger.error(msg) # Delete the worker document Worker.objects(name=name).delete() # Delete all reserved_resource documents for the worker ReservedResource.objects(worker_name=name).delete() # Cancel all of the tasks that were assigned to this worker's queue for task_status in TaskStatus.objects(worker_name=name, state__in=constants.CALL_INCOMPLETE_STATES): cancel(task_status['task_id']) # Delete working directory common_utils.delete_worker_working_directory(name)
def test_delete_worker_working_directory(self, mock_pulp_config_get, mock_path_exists, mock_rmtree): mock_pulp_config_get.return_value = '/var/cache/pulp' delete_worker_working_directory('test-worker') mock_pulp_config_get.assert_called_with('server', 'working_directory') mock_path_exists.assert_called_with('/var/cache/pulp/test-worker') mock_rmtree.assert_called_with('/var/cache/pulp/test-worker')
def initialize_worker(sender, instance, **kwargs): """ This function performs all the necessary initialization of the Celery worker. We clean up old state in case this worker was previously running, but died unexpectedly. In such cases, any Pulp tasks that were running or waiting on this worker will show incorrect state. Any reserved_resource reservations associated with the previous worker will also be removed along with the worker entry in the database itself. The working directory specified in /etc/pulp/server.conf (/var/cache/pulp/<worker_name>) by default is removed and recreated. This is called early in the worker start process, and later when it's fully online, pulp_celerybeat will discover the worker as usual to allow new work to arrive at this worker. If there is no previous work to cleanup, this method still runs, but has no effect on the database. After cleaning up old state, it ensures the existence of the worker's working directory. Lastly, this function makes the call to Pulp's initialization code. It uses the celeryd_after_setup signal[0] so that it gets called by Celery after logging is initialized, but before Celery starts to run tasks. If the worker is a resource manager, it tries to acquire a lock stored within the database. If the lock cannot be acquired immediately, it will wait until the currently active instance becomes unavailable, at which point the worker cleanup routine will clear the lock for us to acquire. While the worker remains in this waiting state, it is not connected to the broker and will not attempt to do any work. A side effect of this is that, if terminated while in this state, the process will not send the "worker-offline" signal used by the EventMonitor to immediately clean up terminated workers. Therefore, we override the SIGTERM signal handler while in this state so that cleanup is done properly. [0] http://celery.readthedocs.org/en/latest/userguide/signals.html#celeryd-after-setup :param sender: The hostname of the worker :type sender: basestring :param instance: The Worker instance to be initialized (unused) :type instance: celery.apps.worker.Worker :param kwargs: Other params (unused) :type kwargs: dict """ initialization.initialize() # Delete any potential old state tasks._delete_worker(sender, normal_shutdown=True) # Create a new working directory for worker that is starting now common_utils.delete_worker_working_directory(sender) common_utils.create_worker_working_directory(sender) # If the worker is a resource manager, try to acquire the lock, or wait until it # can be acquired if sender.startswith(constants.RESOURCE_MANAGER_WORKER_NAME): get_resource_manager_lock(sender)
def _delete_worker(name, normal_shutdown=False): """ Delete the Worker with _id name from the database, cancel any associated tasks and reservations If the worker shutdown normally, no message is logged, otherwise an error level message is logged. Default is to assume the worker did not shut down normally. Any resource reservations associated with this worker are cleaned up by this function. Any tasks associated with this worker are explicitly canceled. :param name: The name of the worker you wish to delete. :type name: basestring :param normal_shutdown: True if the worker shutdown normally, False otherwise. Defaults to False. :type normal_shutdown: bool """ if normal_shutdown is False: msg = _( 'The worker named %(name)s is missing. Canceling the tasks in its queue.' ) msg = msg % {'name': name} _logger.error(msg) # Delete the worker document Worker.objects(name=name).delete() # Delete all reserved_resource documents for the worker ReservedResource.objects(worker_name=name).delete() # Cancel all of the tasks that were assigned to this worker's queue for task_status in TaskStatus.objects( worker_name=name, state__in=constants.CALL_INCOMPLETE_STATES): cancel(task_status['task_id']) # Delete working directory common_utils.delete_worker_working_directory(name)
def initialize_worker(sender, instance, **kwargs): """ This function performs all the necessary initialization of the Celery worker. It starts by cleaning up old state if this worker was previously running, but died unexpectedly. In such cases, any Pulp tasks that were running or waiting on this worker will show incorrect state. Any reserved_resource reservations associated with the previous worker will also be removed along with the worker entry in the database itself. The working directory specified in /etc/pulp/server.conf (/var/cache/pulp/<worker_name>) by default is removed and recreated. This is called early in the worker start process, and later when it's fully online, pulp_celerybeat will discover the worker as usual to allow new work to arrive at this worker. If there is no previous work to cleanup, this method still runs, but has no effect on the database. After cleaning up old state, it ensures the existence of the worker's working directory. Lastly, this function makes the call to Pulp's initialization code. It uses the celeryd_after_setup signal[0] so that it gets called by Celery after logging is initialized, but before Celery starts to run tasks. [0] http://celery.readthedocs.org/en/latest/userguide/signals.html#celeryd-after-setup :param sender: The hostname of the worker :type sender: basestring :param instance: The Worker instance to be initialized (unused) :type instance: celery.apps.worker.Worker :param kwargs: Other params (unused) :type kwargs: dict """ initialization.initialize() tasks._delete_worker(sender, normal_shutdown=True) # Create a new working directory for worker that is starting now common_utils.delete_worker_working_directory(sender) common_utils.create_worker_working_directory(sender)