Esempio n. 1
0
 def validate_backend_id(self, backend_id):
     # Currently, VNX driver only supports 1 remote device.
     if self.active_backend_id:
         if backend_id != 'default':
             raise exception.InvalidReplicationTarget(
                 reason=_('Invalid backend_id specified.'))
     elif backend_id not in (
             common.ReplicationDeviceList.get_backend_ids(self.config)):
         raise exception.InvalidReplicationTarget(
             reason=_('Invalid backend_id specified.'))
Esempio n. 2
0
    def failover_host(self, context, volumes, secondary_id=None, groups=None):
        if secondary_id == 'default':
            if not self.active_backend_id:
                return None, [], []

            volumes_update = huawei_flow.failback(volumes, self.local_cli,
                                                  self.replication_rmt_cli,
                                                  self.configuration)
            secondary_id = ''
        elif secondary_id in (None,
                              self.configuration.replication['backend_id']):
            if (self.active_backend_id ==
                    self.configuration.replication['backend_id']):
                # Already failover, return success
                return self.active_backend_id, [], []

            volumes_update = huawei_flow.failover(volumes, self.local_cli,
                                                  self.replication_rmt_cli,
                                                  self.configuration)
            secondary_id = self.configuration.replication['backend_id']
        else:
            msg = "Invalid secondary id %s." % secondary_id
            raise exception.InvalidReplicationTarget(reason=msg)

        self.active_backend_id = secondary_id
        self._switch_replication_clients()

        return secondary_id, volumes_update, []
class ReplicationTestCase(base.BaseVolumeTestCase):
    def setUp(self):
        super(ReplicationTestCase, self).setUp()
        self.host = 'host@backend#pool'
        self.manager = manager.VolumeManager(host=self.host)

    @mock.patch('cinder.objects.VolumeList.get_all')
    @mock.patch('cinder.volume.driver.BaseVD.failover_host',
                side_effect=exception.InvalidReplicationTarget(''))
    @ddt.data(('backend2', 'default', fields.ReplicationStatus.FAILED_OVER),
              ('backend2', 'backend3', fields.ReplicationStatus.FAILED_OVER),
              (None, 'backend2', fields.ReplicationStatus.ENABLED),
              ('', 'backend2', fields.ReplicationStatus.ENABLED))
    @ddt.unpack
    def test_failover_host_invalid_target(self, svc_backend, new_backend,
                                          expected, mock_failover,
                                          mock_getall):
        """Test replication failover_host with invalid_target.

        When failingover fails due to an invalid target exception we return
        replication_status to its previous status, and we decide what that is
        depending on the currect active backend.
        """
        svc = utils.create_service(
            self.context, {
                'host': self.host,
                'binary': constants.VOLUME_BINARY,
                'active_backend_id': svc_backend,
                'replication_status': fields.ReplicationStatus.FAILING_OVER
            })

        self.manager.failover_host(self.context, new_backend)
        mock_getall.assert_called_once_with(self.context,
                                            filters={'host': self.host})
        mock_failover.assert_called_once_with(self.context,
                                              mock_getall.return_value,
                                              secondary_id=new_backend)

        db_svc = objects.Service.get_by_id(self.context, svc.id)
        self.assertEqual(expected, db_svc.replication_status)

    @mock.patch('cinder.volume.driver.BaseVD.failover_host',
                mock.Mock(side_effect=exception.VolumeDriverException('')))
    def test_failover_host_driver_exception(self):
        svc = utils.create_service(
            self.context, {
                'host': self.host,
                'binary': constants.VOLUME_BINARY,
                'active_backend_id': None,
                'replication_status': fields.ReplicationStatus.FAILING_OVER
            })

        self.manager.failover_host(self.context, mock.sentinel.backend_id)

        db_svc = objects.Service.get_by_id(self.context, svc.id)
        self.assertEqual(fields.ReplicationStatus.FAILOVER_ERROR,
                         db_svc.replication_status)
Esempio n. 4
0
    def _failover_host(self, volumes, secondary_id=None):

        if secondary_id == self.backend_name:
            msg = _("Cannot failover to the same host as the primary.")
            raise exception.InvalidReplicationTarget(reason=msg)

        replication_targets = self.get_replication_backend_names(
            self.configuration)

        if not replication_targets:
            msg = _("No replication targets configured for backend "
                    "%s. Cannot failover.")
            raise exception.InvalidReplicationTarget(reason=msg % self.host)
        elif secondary_id and secondary_id not in replication_targets:
            msg = _("%(target)s is not among replication targets configured "
                    "for back end %(host)s. Cannot failover.")
            payload = {
                'target': secondary_id,
                'host': self.host,
            }
            raise exception.InvalidReplicationTarget(reason=msg % payload)

        flexvols = self.ssc_library.get_ssc_flexvol_names()

        try:
            active_backend_name, volume_updates = self._complete_failover(
                self.backend_name,
                replication_targets,
                flexvols,
                volumes,
                failover_target=secondary_id)
        except exception.NetAppDriverException as e:
            msg = _("Could not complete failover: %s") % e
            raise exception.UnableToFailOver(reason=msg)

        # Update the ZAPI client to the backend we failed over to
        self._update_zapi_client(active_backend_name)

        self.failed_over = True
        self.failed_over_backend_name = active_backend_name

        return active_backend_name, volume_updates
Esempio n. 5
0
 def failover_host(self, context, volumes, secondary_id=None, groups=None):
     if secondary_id not in self.failover_choices:
         msg = (_("Target %(target)s is not a valid choice. "
                  "Valid choices: %(choices)s.") % {
                      "target": secondary_id,
                      "choices": ', '.join(self.failover_choices)
                  })
         LOG.error(msg)
         raise exception.InvalidReplicationTarget(reason=msg)
     is_failback = secondary_id == manager.VolumeManager.FAILBACK_SENTINEL
     self.active_backend_id = secondary_id
     volumes_updates, groups_updates = self.adapter.failover_host(
         volumes, groups, is_failback)
     return secondary_id, volumes_updates, groups_updates
Esempio n. 6
0
    def _pre_fail_check(self, vol, running_status_set, data_status_set=None):
        # check the replica_pair status
        vol_name = huawei_utils.encode_name(vol.id)
        vol_id = self.local_client.get_lun_id_by_name(vol_name)
        pair_info = self.local_client.get_pair_info_by_lun_id(vol_id)
        if pair_info:
            running_status = self.local_op.is_running_status(
                running_status_set, pair_info)
            data_status = self.local_op.is_data_status(
                data_status_set, pair_info) if data_status_set else True

            if not (running_status and data_status):
                msg = _('Replication pair %(id)s is not at the status '
                        'failover/failback available, RUNNINGSTATUS: '
                        '%(run)s, SECRESDATASTATUS: %(sec)s.') % {
                            'id': pair_info['ID'],
                            'run': pair_info['RUNNINGSTATUS'],
                            'sec': pair_info['SECRESDATASTATUS']
                        }
                LOG.error(msg)
                raise exception.InvalidReplicationTarget(reason=msg)
Esempio n. 7
0
    def _pre_fail_check(self, volumes, statuc_check_func):
        normal_volumes = []
        pair_ids = []
        group_ids = set()
        volume_pair_infos = {}

        for v in volumes:
            drv_data = huawei_utils.to_dict(v.replication_driver_data)
            pair_id = drv_data.get('pair_id')
            if not pair_id:
                normal_volumes.append(v.id)
                continue

            pair_info = self.pair_op.get_info(pair_id)
            volume_pair_infos[v.id] = pair_info

            cg_id = pair_info.get('CGID')
            if cg_id:
                if cg_id not in group_ids:
                    group_ids.add(cg_id)
            else:
                pair_ids.append(pair_id)

        for pair_info in six.itervalues(volume_pair_infos):
            if not statuc_check_func(pair_info):
                msg = _('Replication pair %(id)s is not at the status '
                        'failover/failback available, RUNNINGSTATUS: %(run)s, '
                        'SECRESDATASTATUS: %(sec)s.') % {
                            'id': pair_info['ID'],
                            'run': pair_info['RUNNINGSTATUS'],
                            'sec': pair_info['SECRESDATASTATUS']
                        }
                LOG.error(msg)
                raise exception.InvalidReplicationTarget(reason=msg)

        return normal_volumes, list(group_ids), pair_ids, volume_pair_infos
Esempio n. 8
0
    def failover_host(self, ctxt, volumes, secondary_id, groups=None):
        """Fail over the volume back and forth.

        if secondary_id is 'default', volumes will be failed back,
        otherwize failed over.
        """
        volume_update_list = []
        if secondary_id == strings.PRIMARY_BACKEND_ID:
            if not self._active_backend_id:
                LOG.info("Host has been failed back. doesn't need "
                         "to fail back again.")
                return self._active_backend_id, volume_update_list, []
        else:
            if self._active_backend_id:
                LOG.info("Host has been failed over to %s.",
                         self._active_backend_id)
                return self._active_backend_id, volume_update_list, []

            backend_id = self._replication._target_helper.backend['id']
            if secondary_id is None:
                secondary_id = backend_id
            elif secondary_id != backend_id:
                raise exception.InvalidReplicationTarget(
                    message=(_('Invalid secondary_backend_id specified. '
                               'Valid backend id is %s.') % backend_id))

        LOG.debug("Starting failover to %s.", secondary_id)

        replicated_luns = []
        for volume in volumes:
            lun = Lun(volume)
            if lun.type_replication and lun.status == "available":
                replicated_luns.append(lun)
            else:
                volume_update = (
                    self._replication.failover_unreplicated_volume(lun))
                volume_update_list.append(volume_update)

        if replicated_luns:
            try:
                if secondary_id != strings.PRIMARY_BACKEND_ID:
                    self._replication.do_pprc_failover(replicated_luns,
                                                       secondary_id)
                    self._active_backend_id = secondary_id
                    replicated_luns = self._switch_backend_connection(
                        secondary_id, replicated_luns)
                else:
                    self._replication.start_pprc_failback(
                        replicated_luns, self._active_backend_id)
                    self._active_backend_id = ""
                    self._helper = self._replication._source_helper
            except restclient.APIException as e:
                raise exception.UnableToFailOver(
                    reason=(_("Unable to failover host to %(id)s. "
                              "Exception= %(ex)s")
                            % {'id': secondary_id, 'ex': six.text_type(e)}))

            for lun in replicated_luns:
                volume_update = lun.get_volume_update()
                volume_update['replication_status'] = (
                    'failed-over' if self._active_backend_id else 'enabled')
                model_update = {'volume_id': lun.os_id,
                                'updates': volume_update}
                volume_update_list.append(model_update)
        else:
            LOG.info("No volume has replication capability.")
            if secondary_id != strings.PRIMARY_BACKEND_ID:
                LOG.info("Switch to the target %s", secondary_id)
                self._switch_backend_connection(secondary_id)
                self._active_backend_id = secondary_id
            else:
                LOG.info("Switch to the primary %s", secondary_id)
                self._switch_backend_connection(self._active_backend_id)
                self._active_backend_id = ""

        return secondary_id, volume_update_list, []
Esempio n. 9
0
class ReplicationTestCase(base.BaseVolumeTestCase):
    def setUp(self):
        super(ReplicationTestCase, self).setUp()
        self.host = 'host@backend#pool'
        self.manager = manager.VolumeManager(host=self.host)

    @mock.patch('cinder.objects.VolumeList.get_all')
    @mock.patch('cinder.volume.driver.BaseVD.failover_host',
                side_effect=exception.InvalidReplicationTarget(''))
    @ddt.data(('backend2', 'default', fields.ReplicationStatus.FAILED_OVER),
              ('backend2', 'backend3', fields.ReplicationStatus.FAILED_OVER),
              (None, 'backend2', fields.ReplicationStatus.ENABLED),
              ('', 'backend2', fields.ReplicationStatus.ENABLED))
    @ddt.unpack
    def test_failover_host_invalid_target(self, svc_backend, new_backend,
                                          expected, mock_failover,
                                          mock_getall):
        """Test replication failover_host with invalid_target.

        When failingover fails due to an invalid target exception we return
        replication_status to its previous status, and we decide what that is
        depending on the currect active backend.
        """
        svc = utils.create_service(
            self.context,
            {'host': self.host,
             'binary': constants.VOLUME_BINARY,
             'active_backend_id': svc_backend,
             'replication_status': fields.ReplicationStatus.FAILING_OVER})

        self.manager.failover_host(self.context, new_backend)
        mock_getall.assert_called_once_with(self.context,
                                            filters={'host': self.host})
        mock_failover.assert_called_once_with(self.context,
                                              mock_getall.return_value,
                                              secondary_id=new_backend)

        db_svc = objects.Service.get_by_id(self.context, svc.id)
        self.assertEqual(expected, db_svc.replication_status)

    @mock.patch('cinder.volume.driver.BaseVD.failover_host',
                mock.Mock(side_effect=exception.VolumeDriverException('')))
    def test_failover_host_driver_exception(self):
        svc = utils.create_service(
            self.context,
            {'host': self.host,
             'binary': constants.VOLUME_BINARY,
             'active_backend_id': None,
             'replication_status': fields.ReplicationStatus.FAILING_OVER})

        self.manager.failover_host(self.context, mock.sentinel.backend_id)

        db_svc = objects.Service.get_by_id(self.context, svc.id)
        self.assertEqual(fields.ReplicationStatus.FAILOVER_ERROR,
                         db_svc.replication_status)

    @mock.patch('cinder.objects.Service.is_up', True)
    @mock.patch.object(volume_rpcapi.VolumeAPI, 'failover')
    @mock.patch.object(cinder.db, 'conditional_update')
    @mock.patch.object(objects.ServiceList, 'get_all')
    def test_failover(self, mock_get_all, mock_db_update, mock_failover):
        """Test replication failover."""

        service = fake_service.fake_service_obj(self.context,
                                                binary='cinder-volume')
        mock_get_all.return_value = [service]
        mock_db_update.return_value = {'replication_status': 'enabled'}
        volume_api = cinder.volume.api.API()
        volume_api.failover(self.context, host=CONF.host, cluster_name=None)
        mock_failover.assert_called_once_with(self.context, service, None)

    @mock.patch.object(volume_rpcapi.VolumeAPI, 'failover')
    @mock.patch.object(cinder.db, 'conditional_update')
    @mock.patch.object(cinder.db, 'service_get_all')
    def test_failover_unexpected_status(self, mock_db_get_all, mock_db_update,
                                        mock_failover):
        """Test replication failover unexpected status."""

        mock_db_get_all.return_value = [fake_service.fake_service_obj(
            self.context,
            binary='cinder-volume')]
        mock_db_update.return_value = None
        volume_api = cinder.volume.api.API()
        self.assertRaises(exception.InvalidInput,
                          volume_api.failover,
                          self.context,
                          host=CONF.host,
                          cluster_name=None)

    @mock.patch.object(volume_rpcapi.VolumeAPI, 'freeze_host')
    @mock.patch.object(cinder.db, 'conditional_update', return_value=1)
    @mock.patch.object(cinder.objects.ServiceList, 'get_all')
    def test_freeze_host(self, mock_get_all, mock_db_update,
                         mock_freeze):
        """Test replication freeze_host."""

        service = fake_service.fake_service_obj(self.context,
                                                binary='cinder-volume')
        mock_get_all.return_value = [service]
        mock_freeze.return_value = True
        volume_api = cinder.volume.api.API()
        volume_api.freeze_host(self.context, host=CONF.host, cluster_name=None)
        mock_freeze.assert_called_once_with(self.context, service)

    @mock.patch.object(volume_rpcapi.VolumeAPI, 'freeze_host')
    @mock.patch.object(cinder.db, 'conditional_update')
    @mock.patch.object(cinder.db, 'service_get_all')
    def test_freeze_host_unexpected_status(self, mock_get_all,
                                           mock_db_update,
                                           mock_freeze):
        """Test replication freeze_host unexpected status."""

        mock_get_all.return_value = [fake_service.fake_service_obj(
            self.context,
            binary='cinder-volume')]
        mock_db_update.return_value = None
        volume_api = cinder.volume.api.API()
        self.assertRaises(exception.InvalidInput,
                          volume_api.freeze_host,
                          self.context,
                          host=CONF.host,
                          cluster_name=None)

    @mock.patch.object(volume_rpcapi.VolumeAPI, 'thaw_host')
    @mock.patch.object(cinder.db, 'conditional_update', return_value=1)
    @mock.patch.object(cinder.objects.ServiceList, 'get_all')
    def test_thaw_host(self, mock_get_all, mock_db_update,
                       mock_thaw):
        """Test replication thaw_host."""

        service = fake_service.fake_service_obj(self.context,
                                                binary='cinder-volume')
        mock_get_all.return_value = [service]
        mock_thaw.return_value = True
        volume_api = cinder.volume.api.API()
        volume_api.thaw_host(self.context, host=CONF.host, cluster_name=None)
        mock_thaw.assert_called_once_with(self.context, service)

    @mock.patch.object(volume_rpcapi.VolumeAPI, 'thaw_host')
    @mock.patch.object(cinder.db, 'conditional_update')
    @mock.patch.object(cinder.db, 'service_get_all')
    def test_thaw_host_unexpected_status(self, mock_get_all,
                                         mock_db_update,
                                         mock_thaw):
        """Test replication thaw_host unexpected status."""

        mock_get_all.return_value = [fake_service.fake_service_obj(
            self.context,
            binary='cinder-volume')]
        mock_db_update.return_value = None
        volume_api = cinder.volume.api.API()
        self.assertRaises(exception.InvalidInput,
                          volume_api.thaw_host,
                          self.context,
                          host=CONF.host, cluster_name=None)

    @mock.patch('cinder.volume.driver.BaseVD.failover_completed')
    def test_failover_completed(self, completed_mock):
        rep_field = fields.ReplicationStatus
        svc = objects.Service(self.context, host=self.volume.host,
                              binary=constants.VOLUME_BINARY,
                              replication_status=rep_field.ENABLED)
        svc.create()
        self.volume.failover_completed(
            self.context,
            {'active_backend_id': 'secondary',
             'replication_status': rep_field.FAILED_OVER})
        service = objects.Service.get_by_id(self.context, svc.id)
        self.assertEqual('secondary', service.active_backend_id)
        self.assertEqual('failed-over', service.replication_status)
        completed_mock.assert_called_once_with(self.context, 'secondary')

    @mock.patch('cinder.volume.driver.BaseVD.failover_completed', wraps=True)
    def test_failover_completed_driver_failure(self, completed_mock):
        rep_field = fields.ReplicationStatus
        svc = objects.Service(self.context, host=self.volume.host,
                              binary=constants.VOLUME_BINARY,
                              replication_status=rep_field.ENABLED)
        svc.create()
        self.volume.failover_completed(
            self.context,
            {'active_backend_id': 'secondary',
             'replication_status': rep_field.FAILED_OVER})
        service = objects.Service.get_by_id(self.context, svc.id)
        self.assertEqual('secondary', service.active_backend_id)
        self.assertEqual(rep_field.ERROR, service.replication_status)
        self.assertTrue(service.disabled)
        self.assertIsNotNone(service.disabled_reason)
        completed_mock.assert_called_once_with(self.context, 'secondary')

    @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover_completed')
    def test_finish_failover_non_clustered(self, completed_mock):
        svc = mock.Mock(is_clustered=None)
        self.volume.finish_failover(self.context, svc, mock.sentinel.updates)
        svc.update.assert_called_once_with(mock.sentinel.updates)
        svc.save.assert_called_once_with()
        completed_mock.assert_not_called()

    @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover_completed')
    def test_finish_failover_clustered(self, completed_mock):
        svc = mock.Mock(cluster_name='cluster_name')
        updates = {'status': 'error'}
        self.volume.finish_failover(self.context, svc, updates)
        completed_mock.assert_called_once_with(self.context, svc, updates)
        svc.cluster.status = 'error'
        svc.cluster.save.assert_called_once()

    @ddt.data(None, 'cluster_name')
    @mock.patch('cinder.volume.manager.VolumeManager.finish_failover')
    @mock.patch('cinder.volume.manager.VolumeManager._get_my_volumes')
    def test_failover_manager(self, cluster, get_vols_mock, finish_mock):
        """Test manager's failover method for clustered and not clustered."""
        rep_field = fields.ReplicationStatus
        svc = objects.Service(self.context, host=self.volume.host,
                              binary=constants.VOLUME_BINARY,
                              cluster_name=cluster,
                              replication_status=rep_field.ENABLED)
        svc.create()

        vol = objects.Volume(self.context, host=self.volume.host)
        vol.create()

        get_vols_mock.return_value = [vol]

        with mock.patch.object(self.volume, 'driver') as driver:
            called, not_called = driver.failover_host, driver.failover
            if cluster:
                called, not_called = not_called, called

            called.return_value = ('secondary', [{'volume_id': vol.id,
                                   'updates': {'status': 'error'}}])

            self.volume.failover(self.context,
                                 secondary_backend_id='secondary')

        not_called.assert_not_called()
        called.assert_called_once_with(self.context, [vol],
                                       secondary_id='secondary')

        expected_update = {'replication_status': rep_field.FAILED_OVER,
                           'active_backend_id': 'secondary',
                           'disabled': True,
                           'disabled_reason': 'failed-over'}
        finish_mock.assert_called_once_with(self.context, svc, expected_update)

        volume = objects.Volume.get_by_id(self.context, vol.id)
        self.assertEqual('error', volume.status)

    @ddt.data(('host1', None), (None, 'mycluster'))
    @ddt.unpack
    def test_failover_api_fail_multiple_results(self, host, cluster):
        """Fail if we try to failover multiple backends in the same request."""
        rep_field = fields.ReplicationStatus
        clusters = [
            objects.Cluster(self.context,
                            name='mycluster@backend1',
                            replication_status=rep_field.ENABLED,
                            binary=constants.VOLUME_BINARY),
            objects.Cluster(self.context,
                            name='mycluster@backend2',
                            replication_status=rep_field.ENABLED,
                            binary=constants.VOLUME_BINARY)
        ]
        clusters[0].create()
        clusters[1].create()
        services = [
            objects.Service(self.context, host='host1@backend1',
                            cluster_name=clusters[0].name,
                            replication_status=rep_field.ENABLED,
                            binary=constants.VOLUME_BINARY),
            objects.Service(self.context, host='host1@backend2',
                            cluster_name=clusters[1].name,
                            replication_status=rep_field.ENABLED,
                            binary=constants.VOLUME_BINARY),
        ]
        services[0].create()
        services[1].create()
        self.assertRaises(exception.Invalid,
                          self.volume_api.failover, self.context, host,
                          cluster)

    def test_failover_api_not_found(self):
        self.assertRaises(exception.ServiceNotFound, self.volume_api.failover,
                          self.context, 'host1', None)

    @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover')
    def test_failover_api_success_multiple_results(self, failover_mock):
        """Succeed to failover multiple services for the same backend."""
        rep_field = fields.ReplicationStatus
        cluster_name = 'mycluster@backend1'
        cluster = objects.Cluster(self.context,
                                  name=cluster_name,
                                  replication_status=rep_field.ENABLED,
                                  binary=constants.VOLUME_BINARY)
        cluster.create()
        services = [
            objects.Service(self.context, host='host1@backend1',
                            cluster_name=cluster_name,
                            replication_status=rep_field.ENABLED,
                            binary=constants.VOLUME_BINARY),
            objects.Service(self.context, host='host2@backend1',
                            cluster_name=cluster_name,
                            replication_status=rep_field.ENABLED,
                            binary=constants.VOLUME_BINARY),
        ]
        services[0].create()
        services[1].create()

        self.volume_api.failover(self.context, None, cluster_name,
                                 mock.sentinel.secondary_id)

        for service in services + [cluster]:
            self.assertEqual(rep_field.ENABLED, service.replication_status)
            service.refresh()
            self.assertEqual(rep_field.FAILING_OVER,
                             service.replication_status)

        failover_mock.assert_called_once_with(self.context, mock.ANY,
                                              mock.sentinel.secondary_id)
        self.assertEqual(services[0].id, failover_mock.call_args[0][1].id)

    @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover')
    def test_failover_api_success_multiple_results_not_updated(self,
                                                               failover_mock):
        """Succeed to failover even if a service is not updated."""
        rep_field = fields.ReplicationStatus
        cluster_name = 'mycluster@backend1'
        cluster = objects.Cluster(self.context,
                                  name=cluster_name,
                                  replication_status=rep_field.ENABLED,
                                  binary=constants.VOLUME_BINARY)
        cluster.create()
        services = [
            objects.Service(self.context, host='host1@backend1',
                            cluster_name=cluster_name,
                            replication_status=rep_field.ENABLED,
                            binary=constants.VOLUME_BINARY),
            objects.Service(self.context, host='host2@backend1',
                            cluster_name=cluster_name,
                            replication_status=rep_field.ERROR,
                            binary=constants.VOLUME_BINARY),
        ]
        services[0].create()
        services[1].create()

        self.volume_api.failover(self.context, None, cluster_name,
                                 mock.sentinel.secondary_id)

        for service in services[:1] + [cluster]:
            service.refresh()
            self.assertEqual(rep_field.FAILING_OVER,
                             service.replication_status)

        services[1].refresh()
        self.assertEqual(rep_field.ERROR, services[1].replication_status)

        failover_mock.assert_called_once_with(self.context, mock.ANY,
                                              mock.sentinel.secondary_id)
        self.assertEqual(services[0].id, failover_mock.call_args[0][1].id)

    @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover')
    def test_failover_api_fail_multiple_results_not_updated(self,
                                                            failover_mock):
        """Fail if none of the services could be updated."""
        rep_field = fields.ReplicationStatus
        cluster_name = 'mycluster@backend1'
        cluster = objects.Cluster(self.context,
                                  name=cluster_name,
                                  replication_status=rep_field.ENABLED,
                                  binary=constants.VOLUME_BINARY)
        cluster.create()
        down_time = timeutils.datetime.datetime(1970, 1, 1)
        services = [
            # This service is down
            objects.Service(self.context, host='host1@backend1',
                            cluster_name=cluster_name,
                            replication_status=rep_field.ENABLED,
                            created_at=down_time,
                            updated_at=down_time,
                            modified_at=down_time,
                            binary=constants.VOLUME_BINARY),
            # This service is not with the right replication status
            objects.Service(self.context, host='host2@backend1',
                            cluster_name=cluster_name,
                            replication_status=rep_field.ERROR,
                            binary=constants.VOLUME_BINARY),
        ]
        services[0].create()
        services[1].create()

        self.assertRaises(exception.InvalidInput,
                          self.volume_api.failover, self.context, None,
                          cluster_name, mock.sentinel.secondary_id)

        for service in services:
            svc = objects.Service.get_by_id(self.context, service.id)
            self.assertEqual(service.replication_status,
                             svc.replication_status)

        cluster.refresh()
        self.assertEqual(rep_field.ENABLED, cluster.replication_status)

        failover_mock.assert_not_called()
Esempio n. 10
0
class ReplicationTestCase(base.BaseVolumeTestCase):
    def setUp(self):
        super(ReplicationTestCase, self).setUp()
        self.host = 'host@backend#pool'
        self.manager = manager.VolumeManager(host=self.host)

    @mock.patch('cinder.objects.VolumeList.get_all')
    @mock.patch('cinder.volume.driver.BaseVD.failover_host',
                side_effect=exception.InvalidReplicationTarget(''))
    @ddt.data(('backend2', 'default', fields.ReplicationStatus.FAILED_OVER),
              ('backend2', 'backend3', fields.ReplicationStatus.FAILED_OVER),
              (None, 'backend2', fields.ReplicationStatus.ENABLED),
              ('', 'backend2', fields.ReplicationStatus.ENABLED))
    @ddt.unpack
    def test_failover_host_invalid_target(self, svc_backend, new_backend,
                                          expected, mock_failover,
                                          mock_getall):
        """Test replication failover_host with invalid_target.

        When failingover fails due to an invalid target exception we return
        replication_status to its previous status, and we decide what that is
        depending on the currect active backend.
        """
        svc = utils.create_service(
            self.context,
            {'host': self.host,
             'binary': constants.VOLUME_BINARY,
             'active_backend_id': svc_backend,
             'replication_status': fields.ReplicationStatus.FAILING_OVER})

        self.manager.failover_host(self.context, new_backend)
        mock_getall.assert_called_once_with(self.context,
                                            filters={'host': self.host})
        mock_failover.assert_called_once_with(self.context,
                                              [],
                                              secondary_id=new_backend,
                                              groups=[])

        db_svc = objects.Service.get_by_id(self.context, svc.id)
        self.assertEqual(expected, db_svc.replication_status)

    @mock.patch('cinder.volume.driver.BaseVD.failover_host',
                mock.Mock(side_effect=exception.VolumeDriverException('')))
    def test_failover_host_driver_exception(self):
        svc = utils.create_service(
            self.context,
            {'host': self.host,
             'binary': constants.VOLUME_BINARY,
             'active_backend_id': None,
             'replication_status': fields.ReplicationStatus.FAILING_OVER})

        self.manager.failover_host(self.context, mock.sentinel.backend_id)

        db_svc = objects.Service.get_by_id(self.context, svc.id)
        self.assertEqual(fields.ReplicationStatus.FAILOVER_ERROR,
                         db_svc.replication_status)

    @mock.patch('cinder.objects.Service.is_up', True)
    @mock.patch.object(volume_rpcapi.VolumeAPI, 'failover')
    @mock.patch.object(cinder.db, 'conditional_update')
    @mock.patch.object(objects.ServiceList, 'get_all')
    def test_failover(self, mock_get_all, mock_db_update, mock_failover):
        """Test replication failover."""

        service = fake_service.fake_service_obj(self.context,
                                                binary='cinder-volume')
        mock_get_all.return_value = [service]
        mock_db_update.return_value = {'replication_status': 'enabled'}
        volume_api = cinder.volume.api.API()
        volume_api.failover(self.context, host=CONF.host, cluster_name=None)
        mock_failover.assert_called_once_with(self.context, service, None)

    @mock.patch.object(volume_rpcapi.VolumeAPI, 'failover')
    @mock.patch.object(cinder.db, 'conditional_update')
    @mock.patch.object(cinder.db, 'service_get_all')
    def test_failover_unexpected_status(self, mock_db_get_all, mock_db_update,
                                        mock_failover):
        """Test replication failover unexpected status."""

        mock_db_get_all.return_value = [fake_service.fake_service_obj(
            self.context,
            binary=constants.VOLUME_BINARY)]
        mock_db_update.return_value = None
        volume_api = cinder.volume.api.API()
        self.assertRaises(exception.InvalidInput,
                          volume_api.failover,
                          self.context,
                          host=CONF.host,
                          cluster_name=None)

    @mock.patch.object(volume_rpcapi.VolumeAPI, 'freeze_host')
    @mock.patch.object(cinder.db, 'conditional_update', return_value=1)
    @mock.patch.object(cinder.objects.ServiceList, 'get_all')
    def test_freeze_host(self, mock_get_all, mock_db_update,
                         mock_freeze):
        """Test replication freeze_host."""

        service = fake_service.fake_service_obj(self.context,
                                                binary=constants.VOLUME_BINARY)
        mock_get_all.return_value = [service]
        mock_freeze.return_value = True
        volume_api = cinder.volume.api.API()
        volume_api.freeze_host(self.context, host=CONF.host, cluster_name=None)
        mock_freeze.assert_called_once_with(self.context, service)

    @mock.patch.object(volume_rpcapi.VolumeAPI, 'freeze_host')
    @mock.patch.object(cinder.db, 'conditional_update')
    @mock.patch.object(cinder.db, 'service_get_all')
    def test_freeze_host_unexpected_status(self, mock_get_all,
                                           mock_db_update,
                                           mock_freeze):
        """Test replication freeze_host unexpected status."""

        mock_get_all.return_value = [fake_service.fake_service_obj(
            self.context,
            binary=constants.VOLUME_BINARY)]
        mock_db_update.return_value = None
        volume_api = cinder.volume.api.API()
        self.assertRaises(exception.InvalidInput,
                          volume_api.freeze_host,
                          self.context,
                          host=CONF.host,
                          cluster_name=None)

    @mock.patch.object(volume_rpcapi.VolumeAPI, 'thaw_host')
    @mock.patch.object(cinder.db, 'conditional_update', return_value=1)
    @mock.patch.object(cinder.objects.ServiceList, 'get_all')
    def test_thaw_host(self, mock_get_all, mock_db_update,
                       mock_thaw):
        """Test replication thaw_host."""

        service = fake_service.fake_service_obj(self.context,
                                                binary=constants.VOLUME_BINARY)
        mock_get_all.return_value = [service]
        mock_thaw.return_value = True
        volume_api = cinder.volume.api.API()
        volume_api.thaw_host(self.context, host=CONF.host, cluster_name=None)
        mock_thaw.assert_called_once_with(self.context, service)

    @mock.patch.object(volume_rpcapi.VolumeAPI, 'thaw_host')
    @mock.patch.object(cinder.db, 'conditional_update')
    @mock.patch.object(cinder.db, 'service_get_all')
    def test_thaw_host_unexpected_status(self, mock_get_all,
                                         mock_db_update,
                                         mock_thaw):
        """Test replication thaw_host unexpected status."""

        mock_get_all.return_value = [fake_service.fake_service_obj(
            self.context,
            binary=constants.VOLUME_BINARY)]
        mock_db_update.return_value = None
        volume_api = cinder.volume.api.API()
        self.assertRaises(exception.InvalidInput,
                          volume_api.thaw_host,
                          self.context,
                          host=CONF.host, cluster_name=None)

    @mock.patch('cinder.volume.driver.BaseVD.failover_completed')
    def test_failover_completed(self, completed_mock):
        rep_field = fields.ReplicationStatus
        svc = objects.Service(self.context, host=self.volume.host,
                              binary=constants.VOLUME_BINARY,
                              replication_status=rep_field.ENABLED)
        svc.create()
        self.volume.failover_completed(
            self.context,
            {'active_backend_id': 'secondary',
             'replication_status': rep_field.FAILED_OVER})
        service = objects.Service.get_by_id(self.context, svc.id)
        self.assertEqual('secondary', service.active_backend_id)
        self.assertEqual('failed-over', service.replication_status)
        completed_mock.assert_called_once_with(self.context, 'secondary')

    @mock.patch('cinder.volume.driver.BaseVD.failover_completed', wraps=True)
    def test_failover_completed_driver_failure(self, completed_mock):
        rep_field = fields.ReplicationStatus
        svc = objects.Service(self.context, host=self.volume.host,
                              binary=constants.VOLUME_BINARY,
                              replication_status=rep_field.ENABLED)
        svc.create()
        self.volume.failover_completed(
            self.context,
            {'active_backend_id': 'secondary',
             'replication_status': rep_field.FAILED_OVER})
        service = objects.Service.get_by_id(self.context, svc.id)
        self.assertEqual('secondary', service.active_backend_id)
        self.assertEqual(rep_field.ERROR, service.replication_status)
        self.assertTrue(service.disabled)
        self.assertIsNotNone(service.disabled_reason)
        completed_mock.assert_called_once_with(self.context, 'secondary')

    @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover_completed')
    def test_finish_failover_non_clustered(self, completed_mock):
        svc = mock.Mock(is_clustered=None)
        self.volume.finish_failover(self.context, svc, mock.sentinel.updates)
        svc.update.assert_called_once_with(mock.sentinel.updates)
        svc.save.assert_called_once_with()
        completed_mock.assert_not_called()

    @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover_completed')
    def test_finish_failover_clustered(self, completed_mock):
        svc = mock.Mock(cluster_name='cluster_name')
        updates = {'status': 'error'}
        self.volume.finish_failover(self.context, svc, updates)
        completed_mock.assert_called_once_with(self.context, svc, updates)
        svc.cluster.status = 'error'
        svc.cluster.save.assert_called_once()

    @ddt.data(None, 'cluster_name')
    @mock.patch('cinder.volume.manager.VolumeManager.finish_failover')
    @mock.patch('cinder.volume.manager.VolumeManager._get_my_volumes')
    def test_failover_manager(self, cluster, get_vols_mock, finish_mock):
        """Test manager's failover method for clustered and not clustered."""
        rep_field = fields.ReplicationStatus
        svc = objects.Service(self.context, host=self.volume.host,
                              binary=constants.VOLUME_BINARY,
                              cluster_name=cluster,
                              replication_status=rep_field.ENABLED)
        svc.create()

        vol = objects.Volume(self.context, host=self.volume.host)
        vol.create()

        get_vols_mock.return_value = [vol]

        with mock.patch.object(self.volume, 'driver') as driver:
            called, not_called = driver.failover_host, driver.failover
            if cluster:
                called, not_called = not_called, called

            called.return_value = ('secondary', [{'volume_id': vol.id,
                                   'updates': {'status': 'error'}}], [])

            self.volume.failover(self.context,
                                 secondary_backend_id='secondary')

        not_called.assert_not_called()
        called.assert_called_once_with(self.context, [vol],
                                       secondary_id='secondary', groups=[])

        expected_update = {'replication_status': rep_field.FAILED_OVER,
                           'active_backend_id': 'secondary',
                           'disabled': True,
                           'disabled_reason': 'failed-over'}
        finish_mock.assert_called_once_with(self.context, svc, expected_update)

        volume = objects.Volume.get_by_id(self.context, vol.id)
        self.assertEqual('error', volume.status)

    @ddt.data(('host1', None), (None, 'mycluster'))
    @ddt.unpack
    def test_failover_api_fail_multiple_results(self, host, cluster):
        """Fail if we try to failover multiple backends in the same request."""
        rep_field = fields.ReplicationStatus
        clusters = [
            objects.Cluster(self.context,
                            name='mycluster@backend1',
                            replication_status=rep_field.ENABLED,
                            binary=constants.VOLUME_BINARY),
            objects.Cluster(self.context,
                            name='mycluster@backend2',
                            replication_status=rep_field.ENABLED,
                            binary=constants.VOLUME_BINARY)
        ]
        clusters[0].create()
        clusters[1].create()
        services = [
            objects.Service(self.context, host='host1@backend1',
                            cluster_name=clusters[0].name,
                            replication_status=rep_field.ENABLED,
                            binary=constants.VOLUME_BINARY),
            objects.Service(self.context, host='host1@backend2',
                            cluster_name=clusters[1].name,
                            replication_status=rep_field.ENABLED,
                            binary=constants.VOLUME_BINARY),
        ]
        services[0].create()
        services[1].create()
        self.assertRaises(exception.Invalid,
                          self.volume_api.failover, self.context, host,
                          cluster)

    def test_failover_api_not_found(self):
        self.assertRaises(exception.ServiceNotFound, self.volume_api.failover,
                          self.context, 'host1', None)

    @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover')
    def test_failover_api_success_multiple_results(self, failover_mock):
        """Succeed to failover multiple services for the same backend."""
        rep_field = fields.ReplicationStatus
        cluster_name = 'mycluster@backend1'
        cluster = objects.Cluster(self.context,
                                  name=cluster_name,
                                  replication_status=rep_field.ENABLED,
                                  binary=constants.VOLUME_BINARY)
        cluster.create()
        services = [
            objects.Service(self.context, host='host1@backend1',
                            cluster_name=cluster_name,
                            replication_status=rep_field.ENABLED,
                            binary=constants.VOLUME_BINARY),
            objects.Service(self.context, host='host2@backend1',
                            cluster_name=cluster_name,
                            replication_status=rep_field.ENABLED,
                            binary=constants.VOLUME_BINARY),
        ]
        services[0].create()
        services[1].create()

        self.volume_api.failover(self.context, None, cluster_name,
                                 mock.sentinel.secondary_id)

        for service in services + [cluster]:
            self.assertEqual(rep_field.ENABLED, service.replication_status)
            service.refresh()
            self.assertEqual(rep_field.FAILING_OVER,
                             service.replication_status)

        failover_mock.assert_called_once_with(self.context, mock.ANY,
                                              mock.sentinel.secondary_id)
        self.assertEqual(services[0].id, failover_mock.call_args[0][1].id)

    @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover')
    def test_failover_api_success_multiple_results_not_updated(self,
                                                               failover_mock):
        """Succeed to failover even if a service is not updated."""
        rep_field = fields.ReplicationStatus
        cluster_name = 'mycluster@backend1'
        cluster = objects.Cluster(self.context,
                                  name=cluster_name,
                                  replication_status=rep_field.ENABLED,
                                  binary=constants.VOLUME_BINARY)
        cluster.create()
        services = [
            objects.Service(self.context, host='host1@backend1',
                            cluster_name=cluster_name,
                            replication_status=rep_field.ENABLED,
                            binary=constants.VOLUME_BINARY),
            objects.Service(self.context, host='host2@backend1',
                            cluster_name=cluster_name,
                            replication_status=rep_field.ERROR,
                            binary=constants.VOLUME_BINARY),
        ]
        services[0].create()
        services[1].create()

        self.volume_api.failover(self.context, None, cluster_name,
                                 mock.sentinel.secondary_id)

        for service in services[:1] + [cluster]:
            service.refresh()
            self.assertEqual(rep_field.FAILING_OVER,
                             service.replication_status)

        services[1].refresh()
        self.assertEqual(rep_field.ERROR, services[1].replication_status)

        failover_mock.assert_called_once_with(self.context, mock.ANY,
                                              mock.sentinel.secondary_id)
        self.assertEqual(services[0].id, failover_mock.call_args[0][1].id)

    @mock.patch('cinder.volume.rpcapi.VolumeAPI.failover')
    def test_failover_api_fail_multiple_results_not_updated(self,
                                                            failover_mock):
        """Fail if none of the services could be updated."""
        rep_field = fields.ReplicationStatus
        cluster_name = 'mycluster@backend1'
        cluster = objects.Cluster(self.context,
                                  name=cluster_name,
                                  replication_status=rep_field.ENABLED,
                                  binary=constants.VOLUME_BINARY)
        cluster.create()
        down_time = timeutils.datetime.datetime(1970, 1, 1)
        services = [
            # This service is down
            objects.Service(self.context, host='host1@backend1',
                            cluster_name=cluster_name,
                            replication_status=rep_field.ENABLED,
                            created_at=down_time,
                            updated_at=down_time,
                            modified_at=down_time,
                            binary=constants.VOLUME_BINARY),
            # This service is not with the right replication status
            objects.Service(self.context, host='host2@backend1',
                            cluster_name=cluster_name,
                            replication_status=rep_field.ERROR,
                            binary=constants.VOLUME_BINARY),
        ]
        services[0].create()
        services[1].create()

        self.assertRaises(exception.InvalidInput,
                          self.volume_api.failover, self.context, None,
                          cluster_name, mock.sentinel.secondary_id)

        for service in services:
            svc = objects.Service.get_by_id(self.context, service.id)
            self.assertEqual(service.replication_status,
                             svc.replication_status)

        cluster.refresh()
        self.assertEqual(rep_field.ENABLED, cluster.replication_status)

        failover_mock.assert_not_called()

    def _check_failover_db(self, get_method, expected_results):
        db_data = get_method.get_all(self.context, None)
        db_data = {e.id: e for e in db_data}
        for expected in expected_results:
            id_ = expected['id']
            for key, value in expected.items():
                self.assertEqual(value, getattr(db_data[id_], key),
                                 '(%s) ref=%s != act=%s' % (
                                     key, expected, dict(db_data[id_])))

    def _test_failover_model_updates(self, in_volumes, in_snapshots,
                                     driver_volumes, driver_result,
                                     out_volumes, out_snapshots,
                                     in_groups=None, out_groups=None,
                                     driver_group_result=None,
                                     secondary_id=None):
        host = vol_utils.extract_host(self.manager.host)
        utils.create_service(self.context, {'host': host,
                                            'binary': constants.VOLUME_BINARY})
        for volume in in_volumes:
            utils.create_volume(self.context, self.manager.host, **volume)

        for snapshot in in_snapshots:
            utils.create_snapshot(self.context, **snapshot)

        for group in in_groups:
            utils.create_group(self.context, self.manager.host, **group)

        with mock.patch.object(
                self.manager.driver, 'failover_host',
                return_value=(secondary_id, driver_result,
                              driver_group_result)) as driver_mock:
            self.manager.failover_host(self.context, secondary_id)

            self.assertSetEqual(driver_volumes,
                                {v.id for v in driver_mock.call_args[0][1]})

        self._check_failover_db(objects.VolumeList, out_volumes)
        self._check_failover_db(objects.SnapshotList, out_snapshots)
        self._check_failover_db(objects.GroupList, out_groups)

    @mock.patch('cinder.volume.utils.is_group_a_type')
    def test_failover_host_model_updates(self, mock_group_type):
        status = fields.ReplicationStatus
        mock_group_type.return_value = True
        in_groups = [
            {'id': str(uuid.uuid4()), 'status': 'available',
             'group_type_id': fake.GROUP_TYPE_ID,
             'volume_type_ids': [fake.VOLUME_TYPE_ID],
             'replication_status': status.FAILOVER_ERROR},
            {'id': str(uuid.uuid4()), 'status': 'available',
             'group_type_id': fake.GROUP_TYPE_ID,
             'volume_type_ids': [fake.VOLUME_TYPE_ID],
             'replication_status': status.ENABLED},
        ]
        driver_group_result = [
            {'group_id': in_groups[0]['id'],
             'updates': {'replication_status': status.FAILOVER_ERROR}},
            {'group_id': in_groups[1]['id'],
             'updates': {'replication_status': status.FAILED_OVER}},
        ]
        out_groups = [
            {'id': in_groups[0]['id'], 'status': 'error',
             'replication_status': status.FAILOVER_ERROR},
            {'id': in_groups[1]['id'], 'status': in_groups[1]['status'],
             'replication_status': status.FAILED_OVER},
        ]

        # test volumes
        in_volumes = [
            {'id': str(uuid.uuid4()), 'status': 'available',
             'replication_status': status.DISABLED},
            {'id': str(uuid.uuid4()), 'status': 'in-use',
             'replication_status': status.NOT_CAPABLE},
            {'id': str(uuid.uuid4()), 'status': 'available',
             'replication_status': status.FAILOVER_ERROR},
            {'id': str(uuid.uuid4()), 'status': 'in-use',
             'replication_status': status.ENABLED},
            {'id': str(uuid.uuid4()), 'status': 'available',
             'replication_status': status.FAILOVER_ERROR},
            {'id': str(uuid.uuid4()), 'status': 'in-use',
             'replication_status': status.ENABLED},
            {'id': str(uuid.uuid4()), 'status': 'available',
             'group_id': in_groups[0]['id'],
             'replication_status': status.FAILOVER_ERROR},
            {'id': str(uuid.uuid4()), 'status': 'available',
             'group_id': in_groups[1]['id'],
             'replication_status': status.ENABLED},
        ]
        in_snapshots = [
            {'id': v['id'], 'volume_id': v['id'], 'status': 'available'}
            for v in in_volumes
        ]
        driver_volumes = {
            v['id'] for v in in_volumes
            if v['replication_status'] not in (status.DISABLED,
                                               status.NOT_CAPABLE)}
        driver_result = [
            {'volume_id': in_volumes[3]['id'],
             'updates': {'status': 'error'}},
            {'volume_id': in_volumes[4]['id'],
             'updates': {'replication_status': status.FAILOVER_ERROR}},
            {'volume_id': in_volumes[5]['id'],
             'updates': {'replication_status': status.FAILED_OVER}},
            {'volume_id': in_volumes[6]['id'],
             'updates': {'replication_status': status.FAILOVER_ERROR}},
            {'volume_id': in_volumes[7]['id'],
             'updates': {'replication_status': status.FAILED_OVER}},
        ]
        out_volumes = [
            {'id': in_volumes[0]['id'], 'status': 'error',
             'replication_status': status.NOT_CAPABLE,
             'previous_status': in_volumes[0]['status']},
            {'id': in_volumes[1]['id'], 'status': 'error',
             'replication_status': status.NOT_CAPABLE,
             'previous_status': in_volumes[1]['status']},
            {'id': in_volumes[2]['id'], 'status': in_volumes[2]['status'],
             'replication_status': status.FAILED_OVER},
            {'id': in_volumes[3]['id'], 'status': 'error',
             'previous_status': in_volumes[3]['status'],
             'replication_status': status.FAILOVER_ERROR},
            {'id': in_volumes[4]['id'], 'status': 'error',
             'previous_status': in_volumes[4]['status'],
             'replication_status': status.FAILOVER_ERROR},
            {'id': in_volumes[5]['id'], 'status': in_volumes[5]['status'],
             'replication_status': status.FAILED_OVER},
            {'id': in_volumes[6]['id'], 'status': 'error',
             'previous_status': in_volumes[6]['status'],
             'replication_status': status.FAILOVER_ERROR},
            {'id': in_volumes[7]['id'], 'status': in_volumes[7]['status'],
             'replication_status': status.FAILED_OVER},
        ]
        out_snapshots = [
            {'id': ov['id'],
             'status': 'error' if ov['status'] == 'error' else 'available'}
            for ov in out_volumes
        ]

        self._test_failover_model_updates(in_volumes, in_snapshots,
                                          driver_volumes, driver_result,
                                          out_volumes, out_snapshots,
                                          in_groups, out_groups,
                                          driver_group_result)

    def test_failback_host_model_updates(self):
        status = fields.ReplicationStatus
        # IDs will be overwritten with UUIDs, but they help follow the code
        in_volumes = [
            {'id': 0, 'status': 'available',
             'replication_status': status.DISABLED},
            {'id': 1, 'status': 'in-use',
             'replication_status': status.NOT_CAPABLE},
            {'id': 2, 'status': 'available',
             'replication_status': status.FAILOVER_ERROR},
            {'id': 3, 'status': 'in-use',
             'replication_status': status.ENABLED},
            {'id': 4, 'status': 'available',
             'replication_status': status.FAILOVER_ERROR},
            {'id': 5, 'status': 'in-use',
             'replication_status': status.FAILED_OVER},
        ]
        # Generate real volume IDs
        for volume in in_volumes:
            volume['id'] = str(uuid.uuid4())
        in_snapshots = [
            {'id': in_volumes[0]['id'], 'volume_id': in_volumes[0]['id'],
             'status': fields.SnapshotStatus.ERROR_DELETING},
            {'id': in_volumes[1]['id'], 'volume_id': in_volumes[1]['id'],
             'status': fields.SnapshotStatus.AVAILABLE},
            {'id': in_volumes[2]['id'], 'volume_id': in_volumes[2]['id'],
             'status': fields.SnapshotStatus.CREATING},
            {'id': in_volumes[3]['id'], 'volume_id': in_volumes[3]['id'],
             'status': fields.SnapshotStatus.DELETING},
            {'id': in_volumes[4]['id'], 'volume_id': in_volumes[4]['id'],
             'status': fields.SnapshotStatus.CREATING},
            {'id': in_volumes[5]['id'], 'volume_id': in_volumes[5]['id'],
             'status': fields.SnapshotStatus.CREATING},
        ]
        driver_volumes = {
            v['id'] for v in in_volumes
            if v['replication_status'] not in (status.DISABLED,
                                               status.NOT_CAPABLE)}
        driver_result = [
            {'volume_id': in_volumes[3]['id'],
             'updates': {'status': 'error'}},
            {'volume_id': in_volumes[4]['id'],
             'updates': {'replication_status': status.FAILOVER_ERROR}},
            {'volume_id': in_volumes[5]['id'],
             'updates': {'replication_status': status.FAILED_OVER}},
        ]
        out_volumes = [
            {'id': in_volumes[0]['id'], 'status': in_volumes[0]['status'],
             'replication_status': in_volumes[0]['replication_status'],
             'previous_status': None},
            {'id': in_volumes[1]['id'], 'status': in_volumes[1]['status'],
             'replication_status': in_volumes[1]['replication_status'],
             'previous_status': None},
            {'id': in_volumes[2]['id'], 'status': in_volumes[2]['status'],
             'replication_status': status.ENABLED},
            {'id': in_volumes[3]['id'], 'status': 'error',
             'previous_status': in_volumes[3]['status'],
             'replication_status': status.FAILOVER_ERROR},
            {'id': in_volumes[4]['id'], 'status': 'error',
             'previous_status': in_volumes[4]['status'],
             'replication_status': status.FAILOVER_ERROR},
            {'id': in_volumes[5]['id'], 'status': in_volumes[5]['status'],
             'replication_status': status.ENABLED},
        ]
        # Snapshot status is preserved except for those that error the failback
        out_snapshots = in_snapshots[:]
        out_snapshots[3]['status'] = fields.SnapshotStatus.ERROR
        out_snapshots[4]['status'] = fields.SnapshotStatus.ERROR

        self._test_failover_model_updates(in_volumes, in_snapshots,
                                          driver_volumes, driver_result,
                                          out_volumes, out_snapshots,
                                          [], [], [],
                                          self.manager.FAILBACK_SENTINEL)

    @mock.patch('cinder.utils.log_unsupported_driver_warning', mock.Mock())
    @mock.patch('cinder.utils.require_driver_initialized', mock.Mock())
    def test_init_host_with_rpc_clustered_replication(self):
        # These are not OVOs but ORM instances
        cluster = utils.create_cluster(self.context)
        service = utils.create_service(self.context,
                                       {'cluster_name': cluster.name,
                                        'binary': cluster.binary})
        self.assertNotEqual(fields.ReplicationStatus.ENABLED,
                            cluster.replication_status)
        self.assertNotEqual(fields.ReplicationStatus.ENABLED,
                            service.replication_status)

        vol_manager = manager.VolumeManager(
            'cinder.tests.fake_driver.FakeHAReplicatedLoggingVolumeDriver',
            host=service.host, cluster=cluster.name)
        vol_manager.driver = mock.Mock()
        vol_manager.driver.get_volume_stats.return_value = {
            'replication_enabled': True
        }
        vol_manager.init_host_with_rpc()

        cluster_ovo = objects.Cluster.get_by_id(self.context, cluster.id)
        service_ovo = objects.Service.get_by_id(self.context, service.id)

        self.assertEqual(fields.ReplicationStatus.ENABLED,
                         cluster_ovo.replication_status)
        self.assertEqual(fields.ReplicationStatus.ENABLED,
                         service_ovo.replication_status)