def _test_failover_model_updates(self, in_volumes, in_snapshots, driver_volumes, driver_result, out_volumes, out_snapshots, in_groups=None, out_groups=None, driver_group_result=None, secondary_id=None): host = vol_utils.extract_host(self.manager.host) utils.create_service(self.context, {'host': host, 'binary': constants.VOLUME_BINARY}) for volume in in_volumes: utils.create_volume(self.context, self.manager.host, **volume) for snapshot in in_snapshots: utils.create_snapshot(self.context, **snapshot) for group in in_groups: utils.create_group(self.context, self.manager.host, **group) with mock.patch.object( self.manager.driver, 'failover_host', return_value=(secondary_id, driver_result, driver_group_result)) as driver_mock: self.manager.failover_host(self.context, secondary_id) self.assertSetEqual(driver_volumes, {v.id for v in driver_mock.call_args[0][1]}) self._check_failover_db(objects.VolumeList, out_volumes) self._check_failover_db(objects.SnapshotList, out_snapshots) self._check_failover_db(objects.GroupList, out_groups)
def _test_failover_model_updates(self, in_volumes, in_snapshots, driver_volumes, driver_result, out_volumes, out_snapshots, in_groups=None, out_groups=None, driver_group_result=None, secondary_id=None): host = vol_utils.extract_host(self.manager.host) utils.create_service(self.context, {'host': host, 'binary': 'cinder-volume'}) for volume in in_volumes: utils.create_volume(self.context, self.manager.host, **volume) for snapshot in in_snapshots: utils.create_snapshot(self.context, **snapshot) for group in in_groups: utils.create_group(self.context, self.manager.host, **group) with mock.patch.object( self.manager.driver, 'failover_host', return_value=(secondary_id, driver_result, driver_group_result)) as driver_mock: self.manager.failover_host(self.context, secondary_id) self.assertSetEqual(driver_volumes, {v.id for v in driver_mock.call_args[0][1]}) self._check_failover_db(objects.VolumeList, out_volumes) self._check_failover_db(objects.SnapshotList, out_snapshots) self._check_failover_db(objects.GroupList, out_groups)
def test_init_host_with_rpc_clustered_replication(self): # These are not OVOs but ORM instances cluster = utils.create_cluster(self.context) service = utils.create_service(self.context, {'cluster_name': cluster.name, 'binary': cluster.binary}) self.assertNotEqual(fields.ReplicationStatus.ENABLED, cluster.replication_status) self.assertNotEqual(fields.ReplicationStatus.ENABLED, service.replication_status) vol_manager = manager.VolumeManager( 'cinder.tests.fake_driver.FakeHAReplicatedLoggingVolumeDriver', host=service.host, cluster=cluster.name) vol_manager.driver = mock.Mock() vol_manager.driver.get_volume_stats.return_value = { 'replication_enabled': True } vol_manager.init_host_with_rpc() cluster_ovo = objects.Cluster.get_by_id(self.context, cluster.id) service_ovo = objects.Service.get_by_id(self.context, service.id) self.assertEqual(fields.ReplicationStatus.ENABLED, cluster_ovo.replication_status) self.assertEqual(fields.ReplicationStatus.ENABLED, service_ovo.replication_status)
def test_delete_group_frozen(self): service = utils.create_service(self.ctxt, {'frozen': True}) group = utils.create_group(self.ctxt, host=service.host, group_type_id='gt') group_api = cinder.group.api.API() self.assertRaises(exception.InvalidInput, group_api.delete, self.ctxt, group)
def test_failover_host_invalid_target(self, svc_backend, new_backend, expected, mock_failover, mock_getall): """Test replication failover_host with invalid_target. When failingover fails due to an invalid target exception we return replication_status to its previous status, and we decide what that is depending on the currect active backend. """ svc = utils.create_service( self.context, {'host': self.host, 'binary': constants.VOLUME_BINARY, 'active_backend_id': svc_backend, 'replication_status': fields.ReplicationStatus.FAILING_OVER}) self.manager.failover_host(self.context, new_backend) mock_getall.assert_called_once_with(self.context, filters={'host': self.host}) mock_failover.assert_called_once_with(self.context, mock_getall.return_value, secondary_id=new_backend) db_svc = objects.Service.get_by_id(self.context, svc.id) self.assertEqual(expected, db_svc.replication_status)
def test_delete_consistencygroup_frozen(self): service = tests_utils.create_service(self.context, {'frozen': True}) cg = tests_utils.create_consistencygroup(self.context, host=service.host) cg_api = cinder.consistencygroup.api.API() self.assertRaises(exception.InvalidInput, cg_api.delete, self.context, cg)
def test_create_cgsnapshot_frozen(self): service = tests_utils.create_service(self.context, {'frozen': True}) cg = tests_utils.create_consistencygroup(self.context, host=service.host) cg_api = cinder.consistencygroup.api.API() self.assertRaises(exception.InvalidInput, cg_api.create_cgsnapshot, self.context, cg, 'cg', 'desc')
def test_delete_snapshot_frozen(self): service = tests_utils.create_service(self.context, {'frozen': True}) volume = tests_utils.create_volume(self.context, host=service.host) snapshot = tests_utils.create_snapshot(self.context, volume.id) self.assertRaises(exception.InvalidInput, self.volume_api.delete_snapshot, self.context, snapshot)
def test_failover_host_invalid_target(self, svc_backend, new_backend, expected, mock_failover, mock_getall): """Test replication failover_host with invalid_target. When failingover fails due to an invalid target exception we return replication_status to its previous status, and we decide what that is depending on the currect active backend. """ svc = utils.create_service( self.context, {'host': self.host, 'binary': constants.VOLUME_BINARY, 'active_backend_id': svc_backend, 'replication_status': fields.ReplicationStatus.FAILING_OVER}) self.manager.failover_host(self.context, new_backend) mock_getall.assert_called_once_with(self.context, filters={'host': self.host}) mock_failover.assert_called_once_with(self.context, [], secondary_id=new_backend, groups=[]) db_svc = objects.Service.get_by_id(self.context, svc.id) self.assertEqual(expected, db_svc.replication_status)
def test_enabled_service(self): """Test that enabled services cannot be queried.""" service_overrides = {'topic': 'cinder-volume'} service = utils.create_service(self.ctxt, values=service_overrides) self.assertRaises(exception.ServiceNotFound, db.reset_active_backend, self.ctxt, True, 'fake-backend-id', service.host)
def test_create_group_from_src_frozen(self): service = utils.create_service(self.ctxt, {'frozen': True}) group = utils.create_group(self.ctxt, host=service.host, group_type_id='gt') group_api = cinder.group.api.API() self.assertRaises(exception.InvalidInput, group_api.create_from_src, self.ctxt, 'group', 'desc', group_snapshot_id=None, source_group_id=group.id)
def setUp(self): super(VolumeMigrationTestCase, self).setUp() self._clear_patch = mock.patch('cinder.volume.utils.clear_volume', autospec=True) self._clear_patch.start() self.expected_status = 'available' self._service = tests_utils.create_service( self.context, values={'host': 'newhost', 'binary': constants.VOLUME_BINARY})
def test_disabled_service(self): """Test that non-frozen services are rejected.""" service_overrides = {'topic': 'cinder-volume', 'disabled': True} service = utils.create_service(self.ctxt, values=service_overrides) self.assertRaises(exception.ServiceUnavailable, db.reset_active_backend, self.ctxt, True, 'fake-backend-id', service.host)
def test_failover_host_driver_exception(self): svc = utils.create_service( self.context, host=self.host, active_backend_id=None, replication_status=fields.ReplicationStatus.FAILING_OVER) self.manager.failover_host(self.context, mock.sentinel.backend_id) db_svc = objects.Service.get_by_id(self.context, svc.id) self.assertEqual(fields.ReplicationStatus.FAILOVER_ERROR, db_svc.replication_status)
def test_failover_host_driver_exception(self): svc = utils.create_service( self.context, {'host': self.host, 'binary': constants.VOLUME_BINARY, 'active_backend_id': None, 'replication_status': fields.ReplicationStatus.FAILING_OVER}) self.manager.failover_host(self.context, mock.sentinel.backend_id) db_svc = objects.Service.get_by_id(self.context, svc.id) self.assertEqual(fields.ReplicationStatus.FAILOVER_ERROR, db_svc.replication_status)
def test_disabled_and_frozen_service(self): """Test that disabled and frozen services are updated correctly.""" service_overrides = {'topic': 'cinder-volume', 'disabled': True, 'frozen': True, 'replication_status': 'failed-over', 'active_backend_id': 'seconary'} service = utils.create_service(self.ctxt, values=service_overrides) db.reset_active_backend(self.ctxt, True, 'fake-backend-id', service.host) db_service = db.service_get(self.ctxt, service.id) self.assertFalse(db_service.disabled) self.assertEqual('', db_service.disabled_reason) self.assertIsNone(db_service.active_backend_id) self.assertEqual('enabled', db_service.replication_status)
def test_failover_host_invalid_target(self, svc_backend, new_backend, expected, mock_failover, mock_getall): """Test replication failover_host with invalid_target. When failingover fails due to an invalid target exception we return replication_status to its previous status, and we decide what that is depending on the currect active backend. """ svc = utils.create_service( self.context, host=self.host, active_backend_id=svc_backend, replication_status=fields.ReplicationStatus.FAILING_OVER) self.manager.failover_host(self.context, new_backend) mock_getall.assert_called_once_with(self.context, self.host) mock_failover.assert_called_once_with(self.context, mock_getall.return_value, secondary_id=new_backend) db_svc = objects.Service.get_by_id(self.context, svc.id) self.assertEqual(expected, db_svc.replication_status)
def test_create_snapshot_frozen(self, method): service = tests_utils.create_service(self.context, {'frozen': True}) volume = tests_utils.create_volume(self.context, host=service.host) method = getattr(self.volume_api, method) self.assertRaises(exception.InvalidInput, method, self.context, volume, 'name', 'desc')