def setUp(self): super(AdminActionsTest, self).setUp() self.tempdir = tempfile.mkdtemp() self.flags(rpc_backend='cinder.openstack.common.rpc.impl_fake') self.flags(lock_path=self.tempdir, disable_process_locking=True) self.volume_api = volume_api.API() cast_as_call.mock_cast_as_call(self.volume_api.volume_rpcapi.client) cast_as_call.mock_cast_as_call(self.volume_api.scheduler_rpcapi.client) self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True)
def setUp(self): super(AdminActionsTest, self).setUp() self.tempdir = tempfile.mkdtemp() self.flags(rpc_backend="cinder.openstack.common.rpc.impl_fake") self.flags(lock_path=self.tempdir, disable_process_locking=True) self.volume_api = volume_api.API() cast_as_call.mock_cast_as_call(self.volume_api.volume_rpcapi.client) cast_as_call.mock_cast_as_call(self.volume_api.scheduler_rpcapi.client) self.stubs.Set(brick_lvm.LVM, "_vg_exists", lambda x: True)
def setUp(self): super(AdminActionsTest, self).setUp() self.tempdir = self.useFixture(fixtures.TempDir()).path self.fixture = self.useFixture(config_fixture.Config(lockutils.CONF)) self.fixture.config(lock_path=self.tempdir, group='oslo_concurrency') self.fixture.config(disable_process_locking=True, group='oslo_concurrency') self.flags(rpc_backend='cinder.openstack.common.rpc.impl_fake') self.volume_api = volume_api.API() cast_as_call.mock_cast_as_call(self.volume_api.volume_rpcapi.client) cast_as_call.mock_cast_as_call(self.volume_api.scheduler_rpcapi.client) self.stubs.Set(brick_lvm.LVM, '_vg_exists', lambda x: True)
def test_force_delete_snapshot(self): self.stubs.Set(volutils, 'clear_volume', lambda a, b, volume_clear=CONF.volume_clear, volume_clear_size=CONF.volume_clear_size: None) # admin context ctx = context.RequestContext('admin', 'fake', True) # current status is creating volume = db.volume_create(ctx, {'host': 'test', 'size': 1}) snapshot = db.snapshot_create(ctx, {'status': 'creating', 'volume_size': 1, 'volume_id': volume['id']}) path = '/v2/fake/snapshots/%s/action' % snapshot['id'] req = webob.Request.blank(path) req.method = 'POST' req.headers['content-type'] = 'application/json' req.body = jsonutils.dumps({'os-force_delete': {}}) # attach admin context to request req.environ['cinder.context'] = ctx # start service to handle rpc.cast for 'delete snapshot' svc = self.start_service('volume', host='test') cast_as_call.mock_cast_as_call(svc.manager.scheduler_rpcapi.client) # NOTE(flaper87): Instead fo patch `os.path.exists` # create a fake path for the snapshot that should # be deleted and let the check pass def local_path(volume, vg=None): tfile = tempfile.mkstemp(suffix='-cow', dir=self.tempdir) # NOTE(flaper87): Strip `-cow` since it'll be added # later in the happy path. return tfile[1].strip('-cow') self.stubs.Set(svc.manager.driver, "local_path", local_path) # make request resp = req.get_response(app()) # NOTE(flaper87): Since we're using a nested service # lets make sure we yield the control over the service # thread so it can process the recent calls. time.sleep(0.6) # Request is accepted self.assertEqual(resp.status_int, 202) # snapshot is deleted self.assertRaises(exception.NotFound, db.snapshot_get, ctx, snapshot['id']) # cleanup svc.stop()