def test_resource_in_resource_map(self): """ Test _release_resource() with a valid resource. This should remove the resource from the database. """ # Set up two workers now = datetime.utcnow() worker_1 = Worker(name=WORKER_1, last_heartbeat=now) worker_1.save() worker_2 = Worker(name=WORKER_2, last_heartbeat=now) worker_2.save() # Set up two reserved resources reserved_resource_1 = ReservedResource(task_id=str(uuid.uuid4()), worker_name=worker_1.name, resource_id='resource_1') reserved_resource_1.save() reserved_resource_2 = ReservedResource(task_id=str(uuid.uuid4()), worker_name=worker_2.name, resource_id='resource_2') reserved_resource_2.save() # This should remove resource_2 from the _resource_map. tasks._release_resource(reserved_resource_2.task_id) # resource_2 should have been removed from the database self.assertEqual(ReservedResource.objects.count(), 1) rr_1 = ReservedResource.objects.get(task_id=reserved_resource_1.task_id) self.assertEqual(rr_1['worker_name'], reserved_resource_1.worker_name) self.assertEqual(rr_1['resource_id'], 'resource_1')
def test_deletes_workers(self, mock_worker, mock_delete_worker): mock_worker.objects.return_value = [ Worker('name1', datetime.utcnow()), Worker('name2', datetime.utcnow()), ] scheduler.WorkerTimeoutMonitor().check_workers() # make sure _delete_worker is only called for the two expected calls mock_delete_worker.assert_has_calls([mock.call('name1'), mock.call('name2')])
def test_deletes_workers(self, mock_worker, mock_delete_worker): mock_worker.objects.all.return_value = [ Worker(name='name1', last_heartbeat=datetime.utcnow() - timedelta(seconds=400)), Worker(name='name2', last_heartbeat=datetime.utcnow()), ] scheduler.CeleryProcessTimeoutMonitor().check_celery_processes() # make sure _delete_worker is only called for the old worker mock_delete_worker.assert_has_calls([mock.call('name1')])
def test_logs_resource_manager_missing(self, mock__logger, mock_worker, mock_delete_worker): mock_worker.objects.all.return_value = [ Worker(name=constants.SCHEDULER_WORKER_NAME, last_heartbeat=datetime.utcnow()), Worker(name='name2', last_heartbeat=datetime.utcnow()), ] scheduler.CeleryProcessTimeoutMonitor().check_celery_processes() mock__logger.error.assert_called_once_with( 'There are 0 pulp_resource_manager processes running. Pulp will not operate ' 'correctly without at least one pulp_resource_mananger process running.')
def test_debug_logging(self, mock__logger, mock_worker, mock_delete_worker): mock_worker.objects.all.return_value = [ Worker(name='name1', last_heartbeat=datetime.utcnow() - timedelta(seconds=400)), Worker(name='name2', last_heartbeat=datetime.utcnow()), Worker(name=RESOURCE_MANAGER_WORKER_NAME, last_heartbeat=datetime.utcnow()), Worker(name=SCHEDULER_WORKER_NAME, last_heartbeat=datetime.utcnow()), ] scheduler.CeleryProcessTimeoutMonitor().check_celery_processes() mock__logger.debug.assert_has_calls([ mock.call('Checking if pulp_workers, pulp_celerybeat, or ' 'pulp_resource_manager processes are missing for more than 300 seconds'), mock.call('1 pulp_worker processes, 1 pulp_celerybeat processes, ' 'and 1 pulp_resource_manager processes') ])
def test_dispatches__release_resource(self): self.mock_get_worker_for_reservation.return_value = Worker( name='worker1', last_heartbeat=datetime.utcnow()) tasks._queue_reserved_task('task_name', 'my_task_id', 'my_resource_id', [1, 2], {'a': 2}) self.mock__release_resource.apply_async.assert_called_once_with( ('my_task_id', ), routing_key='worker1', exchange='C.dq')
def test_get_unreserved_worker_breaks_out_of_loop(self): self.mock_get_worker_for_reservation.side_effect = NoWorkers() self.mock_get_unreserved_worker.return_value = Worker( name='worker1', last_heartbeat=datetime.utcnow()) tasks._queue_reserved_task('task_name', 'my_task_id', 'my_resource_id', [1, 2], {'a': 2}) self.assertTrue(not self.mock_time.sleep.called)
def test_dispatches_inner_task(self): self.mock_get_worker_for_reservation.return_value = Worker( name='worker1', last_heartbeat=datetime.utcnow()) tasks._queue_reserved_task('task_name', 'my_task_id', 'my_resource_id', [1, 2], {'a': 2}) apply_async = self.mock_celery.tasks['task_name'].apply_async apply_async.assert_called_once_with(1, 2, a=2, routing_key='worker1', task_id='my_task_id', exchange='C.dq')
def test_get_worker_for_reservation_breaks_out_of_loop(self): self.mock_get_worker_for_reservation.return_value = Worker( 'worker1', datetime.utcnow()) tasks._queue_reserved_task('task_name', 'my_task_id', 'my_resource_id', [1, 2], {'a': 2}) self.assertTrue(not self.mock_get_unreserved_worker.called) self.assertTrue(not self.mock_time.sleep.called)
def test_update_repo_and_plugins(self, distributor_update, mock_get_worker_for_reservation): """ Tests the aggregate call to update a repo and its plugins. """ mock_get_worker_for_reservation.return_value = Worker( 'some_queue', datetime.datetime.now()) self.manager.create_repo('repo-1', 'Original', 'Original Description') importer_manager = manager_factory.repo_importer_manager() distributor_manager = manager_factory.repo_distributor_manager() importer_manager.set_importer('repo-1', 'mock-importer', {'key-i1': 'orig-1'}) distributor_manager.add_distributor('repo-1', 'mock-distributor', {'key-d1': 'orig-1'}, True, distributor_id='dist-1') distributor_manager.add_distributor('repo-1', 'mock-distributor', {'key-d2': 'orig-2'}, True, distributor_id='dist-2') # Test repo_delta = {'display_name': 'Updated'} new_importer_config = {'key-i1': 'updated-1', 'key-i2': 'new-1'} new_distributor_configs = { 'dist-1': { 'key-d1': 'updated-1' }, } # only update one of the two distributors result = self.manager.update_repo_and_plugins('repo-1', repo_delta, new_importer_config, new_distributor_configs) self.assertTrue(isinstance(result, TaskResult)) self.assertEquals(None, result.error) repo = result.return_value # Verify self.assertEqual(repo['id'], 'repo-1') self.assertEqual(repo['display_name'], 'Updated') self.assertEqual(repo['description'], 'Original Description') importer = importer_manager.get_importer('repo-1') self.assertEqual(importer['config'], new_importer_config) dist_1 = distributor_manager.get_distributor('repo-1', 'dist-1') self.assertEqual(dist_1['config'], new_distributor_configs['dist-1']) dist_2 = distributor_manager.get_distributor('repo-1', 'dist-2') self.assertEqual(dist_2['config'], {'key-d2': 'orig-2'}) # There should have been a spawned task for the new distributor config expected_task_id = TaskStatus.objects.get( tags='pulp:repository_distributor:dist-1')['task_id'] self.assertEqual(result.spawned_tasks, [{'task_id': expected_task_id}])
def test_creates_and_saves_reserved_resource(self): self.mock_get_worker_for_reservation.return_value = Worker( name='worker1', last_heartbeat=datetime.utcnow()) tasks._queue_reserved_task('task_name', 'my_task_id', 'my_resource_id', [1, 2], {'a': 2}) self.mock_reserved_resource.assert_called_once_with(task_id='my_task_id', worker_name='worker1', resource_id='my_resource_id') self.mock_reserved_resource.return_value.save.assert_called_once_with()
def test_resource_not_in_resource_map(self): """ Test _release_resource() with a resource that is not in the database. This should be gracefully handled, and result in no changes to the database. """ # Set up two workers worker_1 = Worker(name=WORKER_1, last_heartbeat=datetime.utcnow()) worker_1.save() worker_2 = Worker(name=WORKER_2, last_heartbeat=datetime.utcnow()) worker_2.save() # Set up two resource reservations, using our workers from above reserved_resource_1 = ReservedResource(task_id=str(uuid.uuid4()), worker_name=worker_1.name, resource_id='resource_1') reserved_resource_1.save() reserved_resource_2 = ReservedResource(task_id=str(uuid.uuid4()), worker_name=worker_2.name, resource_id='resource_2') reserved_resource_2.save() # This should not raise any Exception, but should also not alter either the Worker # collection or the ReservedResource collection tasks._release_resource('made_up_resource_id') # Make sure that the workers collection has not been altered self.assertEqual(Worker.objects().count(), 2) worker_1 = Worker.objects().get(name=worker_1.name) self.assertTrue(worker_1) worker_2 = Worker.objects().get(name=worker_2.name) self.assertTrue(worker_2) # Make sure that the reserved resources collection has not been altered self.assertEqual(ReservedResource.objects.count(), 2) rr_1 = ReservedResource.objects.get( task_id=reserved_resource_1.task_id) self.assertEqual(rr_1['worker_name'], reserved_resource_1.worker_name) self.assertEqual(rr_1['resource_id'], 'resource_1') rr_2 = ReservedResource.objects.get( task_id=reserved_resource_2.task_id) self.assertEqual(rr_2['worker_name'], reserved_resource_2.worker_name) self.assertEqual(rr_2['resource_id'], 'resource_2')
def test_debug_logging(self, mock__logger, mock_worker, mock_delete_worker): combined_delay = constants.PULP_PROCESS_TIMEOUT_INTERVAL + \ constants.PULP_PROCESS_HEARTBEAT_INTERVAL now = datetime.utcnow() mock_worker.objects.all.return_value = [ Worker(name='name1', last_heartbeat=now - timedelta(seconds=combined_delay)), Worker(name='name2', last_heartbeat=now), Worker(name=constants.RESOURCE_MANAGER_WORKER_NAME, last_heartbeat=now), Worker(name=constants.SCHEDULER_WORKER_NAME, last_heartbeat=now), ] scheduler.CeleryProcessTimeoutMonitor().check_celery_processes() mock__logger.debug.assert_has_calls([ mock.call( 'Checking if pulp_workers, pulp_celerybeat, or pulp_resource_manager processes are ' 'missing for more than %d seconds' % constants.PULP_PROCESS_TIMEOUT_INTERVAL ), mock.call( '1 pulp_worker processes, 1 pulp_celerybeat processes, ' 'and 1 pulp_resource_manager processes' ) ])
def get(self, request, task_id): """ Return a response containing a single task. :param request: WSGI request object :type request: django.core.handlers.wsgi.WSGIRequest :param task_id: The ID of the task you wish to cancel :type task_id: basestring :return: Response containing a serialized dict of the requested task :rtype : django.http.HttpResponse :raises MissingResource: if task is not found """ try: task = TaskStatus.objects.get(task_id=task_id) except DoesNotExist: raise MissingResource(task_id) task_dict = task_serializer(task) if 'worker_name' in task_dict: queue_name = Worker(name=task_dict['worker_name'], last_heartbeat=datetime.now()).queue_name task_dict.update({'queue': queue_name}) return generate_json_response_with_pulp_encoder(task_dict)