def test_work_cleanup_upgrading(self, upgrading_mock): cleanup_request = objects.CleanupRequest(host='myhost') upgrading_mock.return_value = True self.assertRaises(exception.UnavailableDuringUpgrade, self.manager.work_cleanup, self.context, cleanup_request)
def test_do_cleanup_not_cleaning_already_claimed(self): """Basic cleanup that doesn't touch already cleaning works.""" vol = utils.create_volume(self.context, status='creating') worker1 = db.worker_create(self.context, status='creating', resource_type='Volume', resource_id=vol.id, service_id=self.service.id) worker1 = db.worker_get(self.context, id=worker1.id) vol2 = utils.create_volume(self.context, status='deleting') worker2 = db.worker_create(self.context, status='deleting', resource_type='Volume', resource_id=vol2.id, service_id=self.service.id + 1) worker2 = db.worker_get(self.context, id=worker2.id) # Simulate that the change to vol2 worker happened between # worker_get_all and trying to claim a work for cleanup worker2.service_id = self.service.id clean_req = objects.CleanupRequest(service_id=self.service.id) mngr = FakeManager(self.service.id) with mock.patch('cinder.db.worker_get_all') as get_all_mock: get_all_mock.return_value = [worker1, worker2] mngr.do_cleanup(self.context, clean_req) workers = db.worker_get_all(self.context) self.assertEqual(1, len(workers)) self.assertEqual(worker2.id, workers[0].id) vol.refresh() self.assertEqual('creating_cleaned', vol.status) vol2.refresh() self.assertEqual('deleting', vol2.status)
def test_work_cleanup(self, get_mock, vol_clean_mock, sch_clean_mock): args = dict(service_id=1, cluster_name='cluster_name', host='host', binary=constants.VOLUME_BINARY, is_up=False, disabled=True, resource_id=fake.VOLUME_ID, resource_type='Volume') cluster = objects.Cluster(id=1, name=args['cluster_name'], binary=constants.VOLUME_BINARY) services = [objects.Service(self.context, id=2, host='hostname', cluster_name=cluster.name, binary=constants.VOLUME_BINARY, cluster=cluster), objects.Service(self.context, id=3, host='hostname', cluster_name=None, binary=constants.SCHEDULER_BINARY), objects.Service(self.context, id=4, host='hostname', cluster_name=None, binary=constants.VOLUME_BINARY)] get_mock.return_value = services cleanup_request = objects.CleanupRequest(self.context, **args) res = self.manager.work_cleanup(self.context, cleanup_request) self.assertEqual((services[:2], services[2:]), res) self.assertEqual(1, vol_clean_mock.call_count) self.assertEqual(1, sch_clean_mock.call_count)
def test_init_default(self): """Test __init__ when one field is missing.""" for field in self.fields: fields = self.fields.copy() del fields[field] req = objects.CleanupRequest(mock.sentinel.context, **fields) fields[field] = self._req_default(field) self.assertDictEqual(fields, self._req_as_dict(req))
def init_host(self, service_id, **kwargs): ctxt = context.get_admin_context() self.service_id = service_id # TODO(geguileo): Once we don't support MySQL 5.5 anymore we can remove # call to workers_init. db.workers_init() cleanup_request = objects.CleanupRequest(service_id=service_id) self.do_cleanup(ctxt, cleanup_request)
def test_cleanup_too_old(self, method): cleanup_request = objects.CleanupRequest(self.context) rpcapi = scheduler_rpcapi.SchedulerAPI() with mock.patch.object(rpcapi.client, 'can_send_version', return_value=False) as can_send_mock: self.assertRaises(exception.ServiceTooOld, getattr(rpcapi, method), self.context, cleanup_request) can_send_mock.assert_called_once_with('3.4')
def test_do_cleanup_too_old(self): cleanup_request = objects.CleanupRequest(self.context) rpcapi = volume_rpcapi.VolumeAPI() with mock.patch.object(rpcapi.client, 'can_send_version', return_value=False) as can_send_mock: self.assertRaises(exception.ServiceTooOld, rpcapi.do_cleanup, self.context, cleanup_request) can_send_mock.assert_called_once_with('3.7')
def test_do_cleanup(self, host, cluster, get_cctxt_mock): cleanup_request = objects.CleanupRequest(self.context, host=host, cluster_name=cluster) rpcapi = volume_rpcapi.VolumeAPI() rpcapi.do_cleanup(self.context, cleanup_request) get_cctxt_mock.assert_called_once_with( cleanup_request.service_topic_queue, '3.7') get_cctxt_mock.return_value.cast.assert_called_once_with( self.context, 'do_cleanup', cleanup_request=cleanup_request)
def test_init_defaults(self): """Test __init__ when only one field is set.""" all_defaults = { field: self._req_default(field) for field in self.all_fields } for field in self.fields: fields = {field: self.fields[field]} req = objects.CleanupRequest(mock.sentinel.context, **fields) expected = all_defaults.copy() expected.update(fields) self.assertDictEqual(expected, self._req_as_dict(req))
def test_cleanup(self, method, host, cluster, get_client): cleanup_request = objects.CleanupRequest(self.context, host=host, cluster_name=cluster) rpcapi = scheduler_rpcapi.SchedulerAPI() getattr(rpcapi, method)(self.context, cleanup_request) prepare = get_client.return_value.prepare prepare.assert_called_once_with(version='3.4') rpc_call = 'cast' if method == 'do_cleanup' else 'call' getattr(prepare.return_value, rpc_call).assert_called_once_with( self.context, method, cleanup_request=cleanup_request)
def test_do_cleanup_resource_deleted(self): """Cleanup on a resource that's been already deleted.""" vol = utils.create_volume(self.context, status='creating') db.worker_create(self.context, status='creating', resource_type='Volume', resource_id=vol.id, service_id=self.service.id) vol.destroy() clean_req = objects.CleanupRequest(service_id=self.service.id) mngr = FakeManager(self.service.id) mngr.do_cleanup(self.context, clean_req) workers = db.worker_get_all(self.context) self.assertListEqual([], workers)
def test_do_cleanup(self): """Basic successful cleanup.""" vol = utils.create_volume(self.context, status='creating') db.worker_create(self.context, status='creating', resource_type='Volume', resource_id=vol.id, service_id=self.service.id) clean_req = objects.CleanupRequest(service_id=self.service.id) mngr = FakeManager(self.service.id) mngr.do_cleanup(self.context, clean_req) self.assertListEqual([], db.worker_get_all(self.context)) vol.refresh() self.assertEqual('creating_cleaned', vol.status)
def test_do_cleanup_revive_on_cleanup_fail(self, mock_clean): """Cleanup will revive a worker if cleanup fails.""" vol = utils.create_volume(self.context, status='creating') db.worker_create(self.context, status='creating', resource_type='Volume', resource_id=vol.id, service_id=self.service.id) clean_req = objects.CleanupRequest(service_id=self.service.id) mngr = FakeManager(self.service.id) mngr.do_cleanup(self.context, clean_req) workers = db.worker_get_all(self.context) self.assertEqual(1, len(workers)) vol.refresh() self.assertEqual('creating', vol.status)
def test_do_cleanup_keep_worker(self): """Cleanup on a resource that will remove worker when cleaning up.""" vol = utils.create_volume(self.context, status='deleting') db.worker_create(self.context, status='deleting', resource_type='Volume', resource_id=vol.id, service_id=self.service.id) clean_req = objects.CleanupRequest(service_id=self.service.id) mngr = FakeManager(self.service.id, keep_after_clean=True) mngr.do_cleanup(self.context, clean_req) workers = db.worker_get_all(self.context) self.assertEqual(1, len(workers)) vol.refresh() self.assertEqual('deleting_cleaned', vol.status)
def test_do_cleanup_not_cleaning_already_claimed_by_us(self): """Basic cleanup that doesn't touch other thread's claimed works.""" now = timeutils.utcnow() delta = timeutils.datetime.timedelta(seconds=1) original_time = now - delta # Creating the worker in the future, and then changing the in-memory # value of worker2.updated_at to an earlier time, we effectively # simulate that the worker entry was created in the past and that it # has been just updated between worker_get_all and trying # to claim a work for cleanup other_thread_claimed_time = now + delta vol = utils.create_volume(self.context, status='creating') worker1 = db.worker_create(self.context, status='creating', resource_type='Volume', resource_id=vol.id, service_id=self.service.id, updated_at=original_time) worker1 = db.worker_get(self.context, id=worker1.id) vol2 = utils.create_volume(self.context, status='deleting') worker2 = db.worker_create(self.context, status='deleting', resource_type='Volume', resource_id=vol2.id, service_id=self.service.id, updated_at=other_thread_claimed_time) worker2 = db.worker_get(self.context, id=worker2.id) # This with the mock below simulates worker2 was created in the past # and updated right between worker_get_all and worker_claim_for_cleanup worker2.updated_at = original_time clean_req = objects.CleanupRequest(service_id=self.service.id) mngr = FakeManager(self.service.id) with mock.patch('cinder.manager.timeutils.utcnow', return_value=now),\ mock.patch('cinder.db.worker_get_all') as get_all_mock: get_all_mock.return_value = [worker1, worker2] mngr.do_cleanup(self.context, clean_req) workers = db.worker_get_all(self.context) self.assertEqual(1, len(workers)) self.assertEqual(worker2.id, workers[0].id) vol.refresh() self.assertEqual('creating_cleaned', vol.status) vol2.refresh() self.assertEqual('deleting', vol2.status)
def cleanup(self, req, body=None): """Do the cleanup on resources from a specific service/host/node.""" # Let the wsgi middleware convert NotAuthorized exceptions ctxt = self.policy_checker(req, 'cleanup') body = body or {} params = self._prepare_params(ctxt, body, self.allowed_clean_keys) params['until'] = timeutils.utcnow() # NOTE(geguileo): If is_up is not specified in the request # CleanupRequest's default will be used (False) cleanup_request = objects.CleanupRequest(**params) cleaning, unavailable = self.sch_api.work_cleanup( ctxt, cleanup_request) return { 'cleaning': workers_view.ViewBuilder.service_list(cleaning), 'unavailable': workers_view.ViewBuilder.service_list(unavailable), }
def test_do_cleanup_resource_on_another_service(self): """Cleanup on a resource that's been claimed by other service.""" vol = utils.create_volume(self.context, status='deleting') db.worker_create(self.context, status='deleting', resource_type='Volume', resource_id=vol.id, service_id=self.service.id + 1) clean_req = objects.CleanupRequest(service_id=self.service.id) mngr = FakeManager(self.service.id) mngr.do_cleanup(self.context, clean_req) workers = db.worker_get_all(self.context) self.assertEqual(1, len(workers)) vol.refresh() self.assertEqual('deleting', vol.status)
def test_do_cleanup_resource_changed_status(self): """Cleanup on a resource that's changed status.""" vol = utils.create_volume(self.context, status='available') db.worker_create(self.context, status='creating', resource_type='Volume', resource_id=vol.id, service_id=self.service.id) clean_req = objects.CleanupRequest(service_id=self.service.id) mngr = FakeManager(self.service.id) mngr.do_cleanup(self.context, clean_req) workers = db.worker_get_all(self.context) self.assertListEqual([], workers) vol.refresh() self.assertEqual('available', vol.status)
def cleanup(self, req, body=None): """Do the cleanup on resources from a specific service/host/node.""" # Let the wsgi middleware convert NotAuthorized exceptions ctxt = req.environ['cinder.context'] ctxt.authorize(policy.CLEAN_POLICY) body = body or {} for boolean in ('disabled', 'is_up'): if body.get(boolean) is not None: body[boolean] = strutils.bool_from_string(body[boolean]) resource_type = body.get('resource_type') if resource_type: resource_type = resource_type.title() types = cleanable.CinderCleanableObject.cleanable_resource_types if resource_type not in types: valid_types = utils.build_or_str(types) msg = _('Resource type %(resource_type)s not valid,' ' must be %(valid_types)s') msg = msg % { "resource_type": resource_type, "valid_types": valid_types } raise exception.InvalidInput(reason=msg) body['resource_type'] = resource_type resource_id = body.get('resource_id') if resource_id: # If we have the resource type but we don't have where it is # located, we get it from the DB to limit the distribution of the # request by the scheduler, otherwise it will be distributed to all # the services. location_keys = {'service_id', 'cluster_name', 'host'} if not location_keys.intersection(body): workers = db.worker_get_all(ctxt, resource_id=resource_id, binary=body.get('binary'), resource_type=resource_type) if len(workers) == 0: msg = (_('There is no resource with UUID %s pending ' 'cleanup.'), resource_id) raise exception.InvalidInput(reason=msg) if len(workers) > 1: msg = (_('There are multiple resources with UUID %s ' 'pending cleanup. Please be more specific.'), resource_id) raise exception.InvalidInput(reason=msg) worker = workers[0] body.update(service_id=worker.service_id, resource_type=worker.resource_type) body['until'] = timeutils.utcnow() # NOTE(geguileo): If is_up is not specified in the request # CleanupRequest's default will be used (False) cleanup_request = objects.CleanupRequest(**body) cleaning, unavailable = self.sch_api.work_cleanup( ctxt, cleanup_request) return { 'cleaning': workers_view.ViewBuilder.service_list(cleaning), 'unavailable': workers_view.ViewBuilder.service_list(unavailable), }
def init_host(self, service_id, added_to_cluster=None, **kwargs): ctxt = context.get_admin_context() self.service_id = service_id cleanup_request = objects.CleanupRequest(service_id=service_id) self.do_cleanup(ctxt, cleanup_request)
def test_init_all_set(self): """Test __init__ when setting all field values.""" req = objects.CleanupRequest(mock.sentinel.context, **self.fields) self.assertDictEqual(self.fields, self._req_as_dict(req))