Esempio n. 1
0
    def _set_volume_state_and_notify(self, method, updates, context, ex,
                                     request_spec, msg=None):
        # TODO(harlowja): move into a task that just does this later.
        if not msg:
            msg = (_LE("Failed to schedule_%(method)s: %(ex)s") %
                   {'method': method, 'ex': six.text_type(ex)})
        LOG.error(msg)

        volume_state = updates['volume_state']
        properties = request_spec.get('volume_properties', {})

        volume_id = request_spec.get('volume_id', None)

        if volume_id:
            db.volume_update(context, volume_id, volume_state)

        payload = dict(request_spec=request_spec,
                       volume_properties=properties,
                       volume_id=volume_id,
                       state=volume_state,
                       method=method,
                       reason=ex)

        rpc.get_notifier("scheduler").error(context,
                                            'scheduler.' + method,
                                            payload)
Esempio n. 2
0
 def test_name_id_diff(self):
     """Change name ID to mimic volume after migration."""
     vol_ref = testutils.create_volume(self.ctxt, size=1)
     db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'})
     vol_ref = db.volume_get(self.ctxt, vol_ref['id'])
     expected_name = CONF.volume_name_template % 'fake'
     self.assertEqual(vol_ref['name'], expected_name)
    def test_transfer_accept_invalid_volume(self, mock_notify):
        svc = self.start_service('volume', host='test_host')
        self.addCleanup(svc.stop)
        tx_api = transfer_api.API()

        utils.create_volume(self.ctxt, id='1',
                            updated_at=self.updated_at)
        transfer = tx_api.create(self.ctxt, '1', 'Description')
        volume = db.volume_get(self.ctxt, '1')
        self.assertEqual('awaiting-transfer', volume['status'],
                         'Unexpected state')

        calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"),
                 mock.call(self.ctxt, mock.ANY, "transfer.create.end")]
        mock_notify.assert_has_calls(calls)
        self.assertEqual(2, mock_notify.call_count)

        db.volume_update(self.ctxt, '1', {'status': 'wrong'})
        self.assertRaises(exception.InvalidVolume,
                          tx_api.accept,
                          self.ctxt, transfer['id'], transfer['auth_key'])
        db.volume_update(self.ctxt, '1', {'status': 'awaiting-transfer'})

        # Because the InvalidVolume exception is raised in tx_api, so there is
        # only transfer.accept.start called and missing transfer.accept.end.
        calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start")]
        mock_notify.assert_has_calls(calls)
        self.assertEqual(3, mock_notify.call_count)
Esempio n. 4
0
 def test_transfer_invalid_encrypted_volume(self):
     tx_api = transfer_api.API()
     volume = utils.create_volume(self.ctxt, updated_at=self.updated_at)
     db.volume_update(self.ctxt, volume.id,
                      {'encryption_key_id': fake.ENCRYPTION_KEY_ID})
     self.assertRaises(exception.InvalidVolume, tx_api.create, self.ctxt,
                       volume.id, 'Description')
Esempio n. 5
0
 def test_attach_attaching_volume_with_different_mode(self):
     """Test that attaching volume reserved for another mode fails."""
     # admin context
     ctx = context.RequestContext('admin', 'fake', True)
     # current status is available
     volume = db.volume_create(
         ctx, {
             'status': 'available',
             'host': 'test',
             'provider_location': '',
             'size': 1
         })
     # start service to handle rpc messages for attach requests
     svc = self.start_service('volume', host='test')
     values = {
         'status': 'attaching',
         'instance_uuid': fakes.get_fake_uuid()
     }
     db.volume_update(ctx, volume['id'], values)
     db.volume_admin_metadata_update(ctx, volume['id'],
                                     {"attached_mode": 'rw'}, False)
     mountpoint = '/dev/vbd'
     self.assertRaises(exception.InvalidVolume, self.volume_api.attach, ctx,
                       volume, values['instance_uuid'], None, mountpoint,
                       'ro')
     # cleanup
     svc.stop()
Esempio n. 6
0
 def test_attach_attaching_volume_with_different_mode(self):
     """Test that attaching volume reserved for another mode fails."""
     # admin context
     ctx = context.RequestContext('admin', 'fake', True)
     # current status is available
     volume = db.volume_create(ctx, {'status': 'available', 'host': 'test',
                                     'provider_location': '', 'size': 1})
     # start service to handle rpc messages for attach requests
     svc = self.start_service('volume', host='test')
     values = {'status': 'attaching',
               'instance_uuid': fakes.get_fake_uuid()}
     db.volume_update(ctx, volume['id'], values)
     db.volume_admin_metadata_update(ctx, volume['id'],
                                     {"attached_mode": 'rw'}, False)
     mountpoint = '/dev/vbd'
     self.assertRaises(exception.InvalidVolume,
                       self.volume_api.attach,
                       ctx,
                       volume,
                       values['instance_uuid'],
                       None,
                       mountpoint,
                       'ro')
     # cleanup
     svc.stop()
Esempio n. 7
0
    def _set_volume_state_and_notify(self, method, updates, context, ex,
                                     request_spec, msg=None):
        # TODO(harlowja): move into a task that just does this later.
        if not msg:
            msg = (_("Failed to schedule_%(method)s: %(ex)s") %
                   {'method': method, 'ex': ex})
        LOG.error(msg)

        volume_state = updates['volume_state']
        properties = request_spec.get('volume_properties', {})

        volume_id = request_spec.get('volume_id', None)

        if volume_id:
            db.volume_update(context, volume_id, volume_state)

        payload = dict(request_spec=request_spec,
                       volume_properties=properties,
                       volume_id=volume_id,
                       state=volume_state,
                       method=method,
                       reason=ex)

        rpc.get_notifier("scheduler").error(context,
                                            'scheduler.' + method,
                                            payload)
Esempio n. 8
0
 def test_name_id_snapshot_volume_name(self):
     """Make sure snapshot['volume_name'] is updated."""
     vol_ref = testutils.create_volume(self.ctxt, size=1)
     db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'})
     snap_ref = testutils.create_snapshot(self.ctxt, vol_ref['id'])
     expected_name = CONF.volume_name_template % 'fake'
     self.assertEqual(snap_ref['volume_name'], expected_name)
Esempio n. 9
0
    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            if 'consistencygroup' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('consistencygroup changed'))
            if 'glance_metadata' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('glance_metadata changed'))
            if 'snapshots' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('snapshots changed'))
            if 'metadata' in updates:
                # Metadata items that are not specified in the
                # self.metadata will be deleted
                metadata = updates.pop('metadata', None)
                self.metadata = db.volume_metadata_update(self._context,
                                                          self.id, metadata,
                                                          True)
            if self._context.is_admin and 'admin_metadata' in updates:
                metadata = updates.pop('admin_metadata', None)
                self.admin_metadata = db.volume_admin_metadata_update(
                    self._context, self.id, metadata, True)

            db.volume_update(self._context, self.id, updates)
            self.obj_reset_changes()
Esempio n. 10
0
    def test_create_volume_exception_puts_volume_in_error_state(self):
        """Test NoValidHost exception behavior for create_volume.

        Puts the volume in 'error' state and eats the exception.
        """
        fake_volume_id = 1
        self._mox_schedule_method_helper('schedule_create_volume')
        self.mox.StubOutWithMock(db, 'volume_update')

        topic = 'fake_topic'
        volume_id = fake_volume_id
        request_spec = {'volume_id': fake_volume_id}

        self.manager.driver.schedule_create_volume(
            self.context, request_spec, {}).AndRaise(
                exception.NoValidHost(reason=""))
        db.volume_update(self.context, fake_volume_id, {'status': 'error'})

        self.mox.ReplayAll()
        self.manager.create_volume(
            self.context,
            topic,
            volume_id,
            request_spec=request_spec,
            filter_properties={})
Esempio n. 11
0
 def fake_migrate_volume_completion(ctxt,
                                    volume,
                                    new_volume,
                                    error=False):
     db.volume_update(ctxt, volume['id'],
                      {'migration_status': 'completing'})
     raise processutils.ProcessExecutionError
Esempio n. 12
0
 def test_name_id_snapshot_volume_name(self):
     """Make sure snapshot['volume_name'] is updated."""
     vol_ref = testutils.create_volume(self.ctxt, size=1)
     db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'})
     snap_ref = testutils.create_snapshot(self.ctxt, vol_ref['id'])
     expected_name = CONF.volume_name_template % 'fake'
     self.assertEquals(snap_ref['volume_name'], expected_name)
Esempio n. 13
0
    def test_cast_to_volume_host_update_db_with_volume_id(self):
        host = 'fake_host1'
        method = 'fake_method'
        fake_kwargs = {'volume_id': 31337, 'extra_arg': 'meow'}
        queue = 'fake_queue'

        self.mox.StubOutWithMock(timeutils, 'utcnow')
        self.mox.StubOutWithMock(db, 'volume_update')
        self.mox.StubOutWithMock(rpc, 'queue_get_for')
        self.mox.StubOutWithMock(rpc, 'cast')

        timeutils.utcnow().AndReturn('fake-now')
        db.volume_update(self.context, 31337, {
            'host': host,
            'scheduled_at': 'fake-now'
        })
        rpc.queue_get_for(self.context, FLAGS.volume_topic,
                          host).AndReturn(queue)
        rpc.cast(self.context, queue, {'method': method, 'args': fake_kwargs})

        self.mox.ReplayAll()
        driver.cast_to_volume_host(self.context,
                                   host,
                                   method,
                                   update_db=True,
                                   **fake_kwargs)
Esempio n. 14
0
    def test_transfer_accept_invalid_volume(self, mock_notify):
        svc = self.start_service('volume', host='test_host')
        self.addCleanup(svc.stop)
        tx_api = transfer_api.API()

        utils.create_volume(self.ctxt, id='1', updated_at=self.updated_at)
        transfer = tx_api.create(self.ctxt, '1', 'Description')
        volume = db.volume_get(self.ctxt, '1')
        self.assertEqual('awaiting-transfer', volume['status'],
                         'Unexpected state')

        calls = [
            mock.call(self.ctxt, mock.ANY, "transfer.create.start"),
            mock.call(self.ctxt, mock.ANY, "transfer.create.end")
        ]
        mock_notify.assert_has_calls(calls)
        self.assertEqual(2, mock_notify.call_count)

        db.volume_update(self.ctxt, '1', {'status': 'wrong'})
        self.assertRaises(exception.InvalidVolume, tx_api.accept, self.ctxt,
                          transfer['id'], transfer['auth_key'])
        db.volume_update(self.ctxt, '1', {'status': 'awaiting-transfer'})

        # Because the InvalidVolume exception is raised in tx_api, so there is
        # only transfer.accept.start called and missing transfer.accept.end.
        calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start")]
        mock_notify.assert_has_calls(calls)
        self.assertEqual(3, mock_notify.call_count)
Esempio n. 15
0
 def test_name_id_diff(self):
     """Change name ID to mimic volume after migration."""
     vol_ref = testutils.create_volume(self.ctxt, size=1)
     db.volume_update(self.ctxt, vol_ref['id'], {'name_id': 'fake'})
     vol_ref = db.volume_get(self.ctxt, vol_ref['id'])
     expected_name = CONF.volume_name_template % 'fake'
     self.assertEqual(vol_ref['name'], expected_name)
Esempio n. 16
0
    def test_init_host(self):
        """Make sure stuck volumes and backups are reset to correct
        states when backup_manager.init_host() is called
        """
        vol1_id = self._create_volume_db_entry()
        self._create_volume_attach(vol1_id)
        db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'})
        vol2_id = self._create_volume_db_entry()
        self._create_volume_attach(vol2_id)
        db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'})
        backup1 = self._create_backup_db_entry(status='creating')
        backup2 = self._create_backup_db_entry(status='restoring')
        backup3 = self._create_backup_db_entry(status='deleting')

        self.backup_mgr.init_host()
        vol1 = db.volume_get(self.ctxt, vol1_id)
        self.assertEqual(vol1['status'], 'available')
        vol2 = db.volume_get(self.ctxt, vol2_id)
        self.assertEqual(vol2['status'], 'error_restoring')

        backup1 = db.backup_get(self.ctxt, backup1.id)
        self.assertEqual(backup1['status'], 'error')
        backup2 = db.backup_get(self.ctxt, backup2.id)
        self.assertEqual(backup2['status'], 'available')
        self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt,
                          backup3.id)
Esempio n. 17
0
    def test_init_host(self):
        """Make sure stuck volumes and backups are reset to correct
        states when backup_manager.init_host() is called
        """
        vol1_id = self._create_volume_db_entry()
        self._create_volume_attach(vol1_id)
        db.volume_update(self.ctxt, vol1_id, {"status": "backing-up"})
        vol2_id = self._create_volume_db_entry()
        self._create_volume_attach(vol2_id)
        db.volume_update(self.ctxt, vol2_id, {"status": "restoring-backup"})
        backup1_id = self._create_backup_db_entry(status="creating")
        backup2_id = self._create_backup_db_entry(status="restoring")
        backup3_id = self._create_backup_db_entry(status="deleting")

        self.backup_mgr.init_host()
        vol1 = db.volume_get(self.ctxt, vol1_id)
        self.assertEqual(vol1["status"], "available")
        vol2 = db.volume_get(self.ctxt, vol2_id)
        self.assertEqual(vol2["status"], "error_restoring")

        backup1 = db.backup_get(self.ctxt, backup1_id)
        self.assertEqual(backup1["status"], "error")
        backup2 = db.backup_get(self.ctxt, backup2_id)
        self.assertEqual(backup2["status"], "available")
        self.assertRaises(exception.BackupNotFound, db.backup_get, self.ctxt, backup3_id)
Esempio n. 18
0
 def test_name_id_snapshot_volume_name(self):
     """Make sure snapshot['volume_name'] is updated."""
     vol_ref = testutils.create_volume(self.ctxt, size=1)
     db.volume_update(self.ctxt, vol_ref["id"], {"name_id": "fake"})
     snap_ref = testutils.create_snapshot(self.ctxt, vol_ref["id"])
     expected_name = CONF.volume_name_template % "fake"
     self.assertEqual(expected_name, snap_ref["volume_name"])
Esempio n. 19
0
 def test_name_id_diff(self):
     """Change name ID to mimic volume after migration."""
     vol_ref = testutils.create_volume(self.ctxt, size=1)
     db.volume_update(self.ctxt, vol_ref["id"], {"name_id": "fake"})
     vol_ref = db.volume_get(self.ctxt, vol_ref["id"])
     expected_name = CONF.volume_name_template % "fake"
     self.assertEqual(expected_name, vol_ref["name"])
Esempio n. 20
0
    def test_migrate_volume_exception_returns_volume_state(self):
        """Test NoValidHost exception behavior for migrate_volume_to_host.

        Puts the volume in 'error_migrating' state and eats the exception.
        """
        fake_volume_id = 1
        self._mox_schedule_method_helper('host_passes_filters')
        self.mox.StubOutWithMock(db, 'volume_update')

        topic = 'fake_topic'
        volume_id = fake_volume_id
        request_spec = {'volume_id': fake_volume_id}

        self.manager.driver.host_passes_filters(
            self.context, 'host', request_spec, {}).AndRaise(
                exception.NoValidHost(reason=""))
        db.volume_update(self.context, fake_volume_id,
                         {'migration_status': None})

        self.mox.ReplayAll()
        self.manager.migrate_volume_to_host(
            self.context,
            topic,
            volume_id,
            'host',
            True,
            request_spec=request_spec,
            filter_properties={})
Esempio n. 21
0
    def test_transfer_accept(self):
        tx_api = transfer_api.API()
        volume = utils.create_volume(self.ctxt,
                                     id='1',
                                     updated_at=self.updated_at)
        transfer = tx_api.create(self.ctxt, '1', 'Description')
        volume = db.volume_get(self.ctxt, '1')
        self.assertEqual('awaiting-transfer', volume['status'],
                         'Unexpected state')

        self.assertRaises(exception.TransferNotFound, tx_api.accept, self.ctxt,
                          '2', transfer['auth_key'])

        self.assertRaises(exception.InvalidAuthKey, tx_api.accept, self.ctxt,
                          transfer['id'], 'wrong')

        db.volume_update(self.ctxt, '1', {'status': 'wrong'})
        self.assertRaises(exception.InvalidVolume, tx_api.accept, self.ctxt,
                          transfer['id'], transfer['auth_key'])
        db.volume_update(self.ctxt, '1', {'status': 'awaiting-transfer'})

        self.ctxt.user_id = 'new_user_id'
        self.ctxt.project_id = 'new_project_id'
        response = tx_api.accept(self.ctxt, transfer['id'],
                                 transfer['auth_key'])
        volume = db.volume_get(self.ctxt, '1')
        self.assertEqual(volume['project_id'], 'new_project_id',
                         'Unexpected project id')
        self.assertEqual(volume['user_id'], 'new_user_id',
                         'Unexpected user id')

        self.assertEqual(volume['id'], response['volume_id'],
                         'Unexpected volume id in response.')
        self.assertEqual(transfer['id'], response['id'],
                         'Unexpected transfer id in response.')
Esempio n. 22
0
    def test_init_host(self):
        """Make sure stuck volumes and backups are reset to correct
        states when backup_manager.init_host() is called
        """
        vol1_id = self._create_volume_db_entry()
        self._create_volume_attach(vol1_id)
        db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'})
        vol2_id = self._create_volume_db_entry()
        self._create_volume_attach(vol2_id)
        db.volume_update(self.ctxt, vol2_id, {'status': 'restoring-backup'})
        backup1 = self._create_backup_db_entry(status='creating')
        backup2 = self._create_backup_db_entry(status='restoring')
        backup3 = self._create_backup_db_entry(status='deleting')

        self.backup_mgr.init_host()
        vol1 = db.volume_get(self.ctxt, vol1_id)
        self.assertEqual(vol1['status'], 'available')
        vol2 = db.volume_get(self.ctxt, vol2_id)
        self.assertEqual(vol2['status'], 'error_restoring')

        backup1 = db.backup_get(self.ctxt, backup1.id)
        self.assertEqual(backup1['status'], 'error')
        backup2 = db.backup_get(self.ctxt, backup2.id)
        self.assertEqual(backup2['status'], 'available')
        self.assertRaises(exception.BackupNotFound,
                          db.backup_get,
                          self.ctxt,
                          backup3.id)
Esempio n. 23
0
    def _set_volume_state_and_notify(self, method, updates, context, ex,
                                     request_spec):
        LOG.error(
            _("Failed to schedule_%(method)s: %(ex)s") % {
                'method': method,
                'ex': ex
            })

        volume_state = updates['volume_state']
        properties = request_spec.get('volume_properties', {})

        volume_id = request_spec.get('volume_id', None)

        if volume_id:
            db.volume_update(context, volume_id, volume_state)

        payload = dict(request_spec=request_spec,
                       volume_properties=properties,
                       volume_id=volume_id,
                       state=volume_state,
                       method=method,
                       reason=ex)

        notifier.notify(context, notifier.publisher_id("scheduler"),
                        'scheduler.' + method, notifier.ERROR, payload)
    def test_migrate_volume_exception_returns_volume_state(self):
        """Test NoValidHost exception behavior for migrate_volume_to_host.

        Puts the volume in 'error_migrating' state and eats the exception.
        """
        fake_volume_id = 1
        self._mox_schedule_method_helper('host_passes_filters')
        self.mox.StubOutWithMock(db, 'volume_update')

        topic = 'fake_topic'
        volume_id = fake_volume_id
        request_spec = {'volume_id': fake_volume_id}

        self.manager.driver.host_passes_filters(
            self.context, 'host', request_spec,
            {}).AndRaise(exception.NoValidHost(reason=""))
        db.volume_update(self.context, fake_volume_id,
                         {'migration_status': None})

        self.mox.ReplayAll()
        self.manager.migrate_volume_to_host(self.context,
                                            topic,
                                            volume_id,
                                            'host',
                                            True,
                                            request_spec=request_spec,
                                            filter_properties={})
Esempio n. 25
0
    def test_transfer_accept(self, mock_notify):
        svc = self.start_service('volume', host='test_host')
        tx_api = transfer_api.API()
        utils.create_volume(self.ctxt, id='1',
                            updated_at=self.updated_at)
        transfer = tx_api.create(self.ctxt, '1', 'Description')
        volume = db.volume_get(self.ctxt, '1')
        self.assertEqual('awaiting-transfer', volume['status'],
                         'Unexpected state')

        self.assertRaises(exception.TransferNotFound,
                          tx_api.accept,
                          self.ctxt, '2', transfer['auth_key'])

        self.assertRaises(exception.InvalidAuthKey,
                          tx_api.accept,
                          self.ctxt, transfer['id'], 'wrong')

        calls = [mock.call(self.ctxt, mock.ANY, "transfer.create.start"),
                 mock.call(self.ctxt, mock.ANY, "transfer.create.end")]
        mock_notify.assert_has_calls(calls)
        self.assertEqual(2, mock_notify.call_count)

        db.volume_update(self.ctxt, '1', {'status': 'wrong'})
        self.assertRaises(exception.InvalidVolume,
                          tx_api.accept,
                          self.ctxt, transfer['id'], transfer['auth_key'])
        db.volume_update(self.ctxt, '1', {'status': 'awaiting-transfer'})

        # Because the InvalidVolume exception is raised in tx_api, so there is
        # only transfer.accept.start called and missing transfer.accept.end.
        calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start")]
        mock_notify.assert_has_calls(calls)
        self.assertEqual(3, mock_notify.call_count)

        self.ctxt.user_id = 'new_user_id'
        self.ctxt.project_id = 'new_project_id'
        response = tx_api.accept(self.ctxt,
                                 transfer['id'],
                                 transfer['auth_key'])
        volume = db.volume_get(self.ctxt, '1')
        self.assertEqual(volume['project_id'], 'new_project_id',
                         'Unexpected project id')
        self.assertEqual(volume['user_id'], 'new_user_id',
                         'Unexpected user id')

        self.assertEqual(volume['id'], response['volume_id'],
                         'Unexpected volume id in response.')
        self.assertEqual(transfer['id'], response['id'],
                         'Unexpected transfer id in response.')

        calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start"),
                 mock.call(self.ctxt, mock.ANY, "transfer.accept.end")]
        mock_notify.assert_has_calls(calls)
        self.assertEqual(5, mock_notify.call_count)

        svc.stop()
Esempio n. 26
0
 def test_transfer_invalid_encrypted_volume(self):
     tx_api = transfer_api.API()
     volume = utils.create_volume(self.ctxt, updated_at=self.updated_at)
     db.volume_update(self.ctxt,
                      volume.id,
                      {'encryption_key_id': fake.ENCRYPTION_KEY_ID})
     self.assertRaises(exception.InvalidVolume,
                       tx_api.create,
                       self.ctxt, volume.id, 'Description')
Esempio n. 27
0
    def test_transfer_accept(self, mock_notify):
        svc = self.start_service('volume', host='test_host')
        tx_api = transfer_api.API()
        utils.create_volume(self.ctxt, id='1', updated_at=self.updated_at)
        transfer = tx_api.create(self.ctxt, '1', 'Description')
        volume = db.volume_get(self.ctxt, '1')
        self.assertEqual('awaiting-transfer', volume['status'],
                         'Unexpected state')

        self.assertRaises(exception.TransferNotFound, tx_api.accept, self.ctxt,
                          '2', transfer['auth_key'])

        self.assertRaises(exception.InvalidAuthKey, tx_api.accept, self.ctxt,
                          transfer['id'], 'wrong')

        calls = [
            mock.call(self.ctxt, mock.ANY, "transfer.create.start"),
            mock.call(self.ctxt, mock.ANY, "transfer.create.end")
        ]
        mock_notify.assert_has_calls(calls)
        self.assertEqual(2, mock_notify.call_count)

        db.volume_update(self.ctxt, '1', {'status': 'wrong'})
        self.assertRaises(exception.InvalidVolume, tx_api.accept, self.ctxt,
                          transfer['id'], transfer['auth_key'])
        db.volume_update(self.ctxt, '1', {'status': 'awaiting-transfer'})

        # Because the InvalidVolume exception is raised in tx_api, so there is
        # only transfer.accept.start called and missing transfer.accept.end.
        calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start")]
        mock_notify.assert_has_calls(calls)
        self.assertEqual(3, mock_notify.call_count)

        self.ctxt.user_id = 'new_user_id'
        self.ctxt.project_id = 'new_project_id'
        response = tx_api.accept(self.ctxt, transfer['id'],
                                 transfer['auth_key'])
        volume = db.volume_get(self.ctxt, '1')
        self.assertEqual(volume['project_id'], 'new_project_id',
                         'Unexpected project id')
        self.assertEqual(volume['user_id'], 'new_user_id',
                         'Unexpected user id')

        self.assertEqual(volume['id'], response['volume_id'],
                         'Unexpected volume id in response.')
        self.assertEqual(transfer['id'], response['id'],
                         'Unexpected transfer id in response.')

        calls = [
            mock.call(self.ctxt, mock.ANY, "transfer.accept.start"),
            mock.call(self.ctxt, mock.ANY, "transfer.accept.end")
        ]
        mock_notify.assert_has_calls(calls)
        self.assertEqual(5, mock_notify.call_count)

        svc.stop()
Esempio n. 28
0
def cast_to_volume_host(context, host, method, update_db=True, **kwargs):
    """Cast request to a volume host queue"""

    if update_db:
        volume_id = kwargs.get("volume_id", None)
        if volume_id is not None:
            now = utils.utcnow()
            db.volume_update(context, volume_id, {"host": host, "scheduled_at": now})
    rpc.cast(context, db.queue_get_for(context, FLAGS.volume_topic, host), {"method": method, "args": kwargs})
    LOG.debug(_("Casted '%(method)s' to host '%(host)s'") % locals())
Esempio n. 29
0
 def test_volume_update(self):
     volume = db.volume_create(self.ctxt, {'host': 'h1'})
     db.volume_update(self.ctxt, volume['id'], {
         'host': 'h2',
         'metadata': {
             'm1': 'v1'
         }
     })
     volume = db.volume_get(self.ctxt, volume['id'])
     self.assertEqual('h2', volume['host'])
Esempio n. 30
0
    def update_host(self, currenthost, newhost):
        """Modify the host name associated with a volume.

        Particularly to recover from cases where one has moved
        their Cinder Volume node, or modified their backend_name in a
        multi-backend config.
        """
        ctxt = context.get_admin_context()
        volumes = db.volume_get_all_by_host(ctxt, currenthost)
        for v in volumes:
            db.volume_update(ctxt, v["id"], {"host": newhost})
Esempio n. 31
0
    def _schedule(self, context, request_spec, filter_properties=None):
        """
        Returns a list of hosts that meet the required specs,
        ordered by their fitness.
        """
        s = super(PowerVCSchedulerDriver, self)
        hosts = s._schedule(context, request_spec,
                            filter_properties=filter_properties)

        if not hosts:
            # no hosts fitted. At least we cannot find the hosts
            # that matches capacity requirement. Log an error to
            # to volume meta data.

            # collect request related information
            volume_id = request_spec['volume_id']
            vol_properties = request_spec['volume_properties']
            req_size = vol_properties['size']

            # collect host_state information
            elevated = context.elevated()
            all_hosts = self.host_manager.get_all_host_states(elevated)

            # For now we are only focusing on the capacity.
            req_info = (_('volume request: '
                          'requested size: %(size)s. ') % {'size': req_size})

            info = ''
            for hstate_info in all_hosts:
                ts = timeutils.isotime(at=hstate_info.updated)
                info += (_("{host: %(hostname)s, free_capacity: %(free_cap)s, "
                           "total_capacity: %(total)s, reserved_percentage:"
                           " %(reserved)s, last update: %(time_updated)s}") %
                         {'hostname': hstate_info.host,
                          'free_cap': hstate_info.free_capacity_gb,
                          'total': hstate_info.total_capacity_gb,
                          'reserved': hstate_info.reserved_percentage,
                          'time_updated': ts})
            if len(info) > 0:
                msg = (_('request exceeds capacity: ' + req_info +
                         ('available capacity: %(info)s') %
                         {'info': info}))
            else:
                msg = (_("No storage has been registered. " + req_info))

            LOG.error(("Schedule Failure: volume_id: %s, " % volume_id) + msg)

            meta_data = {'schedule Failure description': msg[:255]}

            db.volume_update(context, volume_id, {'metadata': meta_data})

            return None
        else:
            return hosts
Esempio n. 32
0
    def update_host(self, currenthost, newhost):
        """Modify the host name associated with a volume.

        Particularly to recover from cases where one has moved
        their Cinder Volume node, or modified their backend_name in a
        multi-backend config.
        """
        ctxt = context.get_admin_context()
        volumes = db.volume_get_all_by_host(ctxt, currenthost)
        for v in volumes:
            db.volume_update(ctxt, v['id'], {'host': newhost})
Esempio n. 33
0
 def create_volume(self, key_id=FIXED_KEY_ID):
     vol = tests_utils.create_volume(self.context, host=self.conf.host)
     vol_id = self.volume.create_volume(self.context, vol)
     if key_id:
         db.volume_update(self.context, vol_id,
                          {'encryption_key_id': key_id})
     self.my_vols = objects.VolumeList.get_all_by_host(
         self.context, self.conf.host)
     # Return a fully baked Volume object (not the partially baked 'vol'
     # and not the DB object).
     return next((v for v in self.my_vols if v.id == vol_id))
Esempio n. 34
0
    def test_volume_host_update_db(self):
        self.mox.StubOutWithMock(timeutils, 'utcnow')
        self.mox.StubOutWithMock(db, 'volume_update')

        timeutils.utcnow().AndReturn('fake-now')
        db.volume_update(self.context, 31337,
                         {'host': 'fake_host',
                          'scheduled_at': 'fake-now'})

        self.mox.ReplayAll()
        driver.volume_update_db(self.context, 31337, 'fake_host')
Esempio n. 35
0
 def create_volume(self, key_id=FIXED_KEY_ID):
     vol = tests_utils.create_volume(self.context, host=self.conf.host)
     vol_id = self.volume.create_volume(self.context, vol)
     if key_id:
         db.volume_update(self.context,
                          vol_id,
                          {'encryption_key_id': key_id})
     self.my_vols = objects.VolumeList.get_all_by_host(self.context,
                                                       self.conf.host)
     # Return a fully baked Volume object (not the partially baked 'vol'
     # and not the DB object).
     return next((v for v in self.my_vols if v.id == vol_id))
    def test_volume_host_update_db(self):
        self.mox.StubOutWithMock(timeutils, 'utcnow')
        self.mox.StubOutWithMock(db, 'volume_update')

        timeutils.utcnow().AndReturn('fake-now')
        db.volume_update(self.context, 31337, {
            'host': 'fake_host',
            'scheduled_at': 'fake-now'
        })

        self.mox.ReplayAll()
        driver.volume_update_db(self.context, 31337, 'fake_host')
Esempio n. 37
0
    def save(self):
        # TODO: (Y release) Remove this online migration code
        # Pass self directly since it's a CinderObjectDictCompat
        self._ensure_use_quota_is_set(self)

        updates = self.cinder_obj_get_changes()
        if updates:
            # NOTE(xyang): Allow this to pass if 'consistencygroup' is
            # set to None. This is to support backward compatibility.
            # Also remove 'consistencygroup' from updates because
            # consistencygroup is the name of a relationship in the ORM
            # Volume model, so SQLA tries to do some kind of update of
            # the foreign key based on the provided updates if
            # 'consistencygroup' is in updates.
            if updates.pop('consistencygroup', None):
                raise exception.ObjectActionError(
                    action='save', reason=_('consistencygroup changed'))
            if 'group' in updates:
                raise exception.ObjectActionError(action='save',
                                                  reason=_('group changed'))
            if 'glance_metadata' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('glance_metadata changed'))
            if 'snapshots' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('snapshots changed'))
            if 'cluster' in updates:
                raise exception.ObjectActionError(action='save',
                                                  reason=_('cluster changed'))
            if 'metadata' in updates:
                # Metadata items that are not specified in the
                # self.metadata will be deleted
                metadata = updates.pop('metadata', None)
                self.metadata = db.volume_metadata_update(
                    self._context, self.id, metadata, True)
            if self._context.is_admin and 'admin_metadata' in updates:
                metadata = updates.pop('admin_metadata', None)
                self.admin_metadata = db.volume_admin_metadata_update(
                    self._context, self.id, metadata, True)

            # When we are creating a volume and we change from 'creating'
            # status to 'downloading' status we have to change the worker entry
            # in the DB to reflect this change, otherwise the cleanup will
            # not be performed as it will be mistaken for a volume that has
            # been somehow changed (reset status, forced operation...)
            if updates.get('status') == 'downloading':
                self.set_worker()

            # updates are changed after popping out metadata.
            if updates:
                db.volume_update(self._context, self.id, updates)
            self.obj_reset_changes()
Esempio n. 38
0
    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            # NOTE(xyang): Allow this to pass if 'consistencygroup' is
            # set to None. This is to support backward compatibility.
            # Also remove 'consistencygroup' from updates because
            # consistencygroup is the name of a relationship in the ORM
            # Volume model, so SQLA tries to do some kind of update of
            # the foreign key based on the provided updates if
            # 'consistencygroup' is in updates.
            if updates.pop('consistencygroup', None):
                raise exception.ObjectActionError(
                    action='save', reason=_('consistencygroup changed'))
            if 'group' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('group changed'))
            if 'glance_metadata' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('glance_metadata changed'))
            if 'snapshots' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('snapshots changed'))
            if 'cluster' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('cluster changed'))
            if 'metadata' in updates:
                # Metadata items that are not specified in the
                # self.metadata will be deleted
                metadata = updates.pop('metadata', None)
                self.metadata = db.volume_metadata_update(self._context,
                                                          self.id, metadata,
                                                          True)
            if self._context.is_admin and 'admin_metadata' in updates:
                metadata = updates.pop('admin_metadata', None)
                self.admin_metadata = db.volume_admin_metadata_update(
                    self._context, self.id, metadata, True)

            # When we are creating a volume and we change from 'creating'
            # status to 'downloading' status we have to change the worker entry
            # in the DB to reflect this change, otherwise the cleanup will
            # not be performed as it will be mistaken for a volume that has
            # been somehow changed (reset status, forced operation...)
            if updates.get('status') == 'downloading':
                self.set_worker()

            # updates are changed after popping out metadata.
            if updates:
                db.volume_update(self._context, self.id, updates)
            self.obj_reset_changes()
Esempio n. 39
0
 def test_attach_attaching_volume_with_different_instance(self):
     """Test that attaching volume reserved for another instance fails."""
     ctx = context.RequestContext("admin", "fake", True)
     # current status is available
     volume = db.volume_create(ctx, {"status": "available", "host": "test", "provider_location": "", "size": 1})
     # start service to handle rpc messages for attach requests
     svc = self.start_service("volume", host="test")
     values = {"status": "attaching", "instance_uuid": fakes.get_fake_uuid()}
     db.volume_update(ctx, volume["id"], values)
     mountpoint = "/dev/vbd"
     self.assertRaises(
         exception.InvalidVolume, self.volume_api.attach, ctx, volume, stubs.FAKE_UUID, None, mountpoint, "rw"
     )
     # cleanup
     svc.stop()
Esempio n. 40
0
def volume_update_db(context, volume_id, host):
    """Set the host and set the scheduled_at field of a volume.

    :returns: A Volume with the updated fields set properly.
    """
    values = {'host': host, 'scheduled_at': timeutils.utcnow()}
    return db.volume_update(context, volume_id, values)
Esempio n. 41
0
 def test_migrate_volume_migrating(self):
     expected_status = 400
     host = "test2"
     volume = self._migrate_volume_prep()
     model_update = {"migration_status": "migrating"}
     volume = db.volume_update(self.ctx, volume["id"], model_update)
     self._migrate_volume_exec(self.ctx, volume, host, expected_status)
Esempio n. 42
0
 def test_cleanup_temp_volume_not_found(self, mock_delete_volume):
     """Ensure we handle missing temp volume for a backup."""
     vol1_id = self._create_volume_db_entry()
     self._create_volume_attach(vol1_id)
     db.volume_update(self.ctxt, vol1_id, {'status': 'backing-up'})
     backup1 = self._create_backup_db_entry(status='error',
                                            volume_id=vol1_id,
                                            temp_volume_id='fake')
     backups = [backup1]
     self.assertEqual('fake', backups[0].temp_volume_id)
     self.assertIsNone(
         self.backup_mgr._cleanup_temp_volumes_snapshots(backups))
     self.assertFalse(mock_delete_volume.called)
     self.assertIsNone(backups[0].temp_volume_id)
     backup1.destroy()
     db.volume_destroy(self.ctxt, vol1_id)
Esempio n. 43
0
def volume_update_db(context, volume_id, host, replica=None):
    '''Set the host and set the scheduled_at field of a volume.

    If this is a replicated volume, create a new DB entry for the replica and
    set the links between them in replication_partner.

    :returns: A Volume with the updated fields set properly.
    '''
    now = timeutils.utcnow()
    values = {'host': host, 'scheduled_at': now}
    if replica:
        volume_ref = db.volume_get(context, volume_id)
        updates = {'scheduled_at': now, 'status': 'replica_creating'}
        for field in [
                'size', 'ec2_id', 'user_id', 'project_id', 'created_at',
                'updated_at', 'display_name', 'display_description'
        ]:
            updates[field] = volume_ref[field]
        replica.update(updates)
        replica_ref = db.volume_create(context, replica)
        replication = {
            'primary_id': volume_id,
            'secondary_id': replica_ref['id'],
            'status': 'starting'
        }
        db.replication_relationship_create(context, replication)

    return db.volume_update(context, volume_id, values)
Esempio n. 44
0
 def test_migrate_volume_migrating(self):
     expected_status = 400
     host = 'test2'
     volume = self._migrate_volume_prep()
     model_update = {'migration_status': 'migrating'}
     volume = db.volume_update(self.ctx, volume['id'], model_update)
     self._migrate_volume_exec(self.ctx, volume, host, expected_status)
Esempio n. 45
0
def cast_to_volume_host(context, host, method, update_db=True, **kwargs):
    """Cast request to a volume host queue"""

    if update_db:
        volume_id = kwargs.get('volume_id', None)
        if volume_id is not None:
            now = timeutils.utcnow()
            db.volume_update(context, volume_id, {
                'host': host,
                'scheduled_at': now
            })
    rpc.cast(context, rpc.queue_get_for(context, FLAGS.volume_topic, host), {
        "method": method,
        "args": kwargs
    })
    LOG.debug(_("Casted '%(method)s' to host '%(host)s'") % locals())
Esempio n. 46
0
def volume_update_db(context, volume_id, host):
    """Set the host and set the scheduled_at field of a volume.

    :returns: A Volume with the updated fields set properly.
    """
    values = {'host': host, 'scheduled_at': timeutils.utcnow()}
    return db.volume_update(context, volume_id, values)
Esempio n. 47
0
 def test_migrate_volume_migrating(self):
     expected_status = 400
     host = 'test2'
     volume = self._migrate_volume_prep()
     model_update = {'migration_status': 'migrating'}
     volume = db.volume_update(self.ctx, volume['id'], model_update)
     self._migrate_volume_exec(self.ctx, volume, host, expected_status)
Esempio n. 48
0
    def test_attach_attaching_volume_with_different_mode(self):
        """Test that attaching volume reserved for another mode fails."""
        # current status is available
        volume = self._create_volume(self.ctx, {
            'provider_location': '',
            'size': 1
        })

        values = {'status': 'attaching', 'instance_uuid': fake.INSTANCE_ID}
        db.volume_update(self.ctx, volume['id'], values)
        db.volume_admin_metadata_update(self.ctx, volume['id'],
                                        {"attached_mode": 'rw'}, False)
        mountpoint = '/dev/vbd'
        self.assertRaises(exception.InvalidVolume, self.volume_api.attach,
                          self.ctx, volume, values['instance_uuid'], None,
                          mountpoint, 'ro')
Esempio n. 49
0
    def test_cleanup_temp_volume_for_one_backup_not_found(self):
        """Ensure we handle missing temp volume for a backup."""
        mock_delete_volume = self.mock_object(lvm.LVMVolumeDriver, "delete_volume")

        vol1_id = self._create_volume_db_entry()
        self._create_volume_attach(vol1_id)
        db.volume_update(self.ctxt, vol1_id, {"status": "backing-up"})
        backup = self._create_backup_db_entry(status="error", volume_id=vol1_id, temp_volume_id="fake")

        self.assertIsNone(self.backup_mgr._cleanup_temp_volumes_snapshots_for_one_backup(self.ctxt, backup))

        self.assertFalse(mock_delete_volume.called)
        self.assertIsNone(backup.temp_volume_id)

        backup.destroy()
        db.volume_destroy(self.ctxt, vol1_id)
Esempio n. 50
0
    def test_attach_attaching_volume_with_different_mode(self):
        """Test that attaching volume reserved for another mode fails."""
        # current status is available
        volume = self._create_volume(self.ctx, {
            'provider_location': '',
            'size': 1
        })

        values = {'status': 'attaching', 'instance_uuid': fake.INSTANCE_ID}
        db.volume_update(self.ctx, volume['id'], values)
        db.volume_admin_metadata_update(self.ctx, volume['id'],
                                        {"attached_mode": 'rw'}, False)
        mountpoint = '/dev/vbd'
        self.assertRaises(exception.InvalidVolume, self.volume_api.attach,
                          self.ctx, volume, values['instance_uuid'], None,
                          mountpoint, 'ro')
Esempio n. 51
0
    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            if 'metadata' in updates:
                # Metadata items that are not specified in the
                # self.metadata will be deleted
                metadata = updates.pop('metadata', None)
                self.metadata = db.volume_metadata_update(
                    self._context, self.id, metadata, True)
            if self._context.is_admin and 'admin_metadata' in updates:
                metadata = updates.pop('admin_metadata', None)
                self.admin_metadata = db.volume_admin_metadata_update(
                    self._context, self.id, metadata, True)

            db.volume_update(self._context, self.id, updates)
            self.obj_reset_changes()
    def test_transfer_accept(self, mock_notify):
        svc = self.start_service("volume", host="test_host")
        tx_api = transfer_api.API()
        utils.create_volume(self.ctxt, id="1", updated_at=self.updated_at)
        transfer = tx_api.create(self.ctxt, "1", "Description")
        volume = db.volume_get(self.ctxt, "1")
        self.assertEqual("awaiting-transfer", volume["status"], "Unexpected state")

        self.assertRaises(exception.TransferNotFound, tx_api.accept, self.ctxt, "2", transfer["auth_key"])

        self.assertRaises(exception.InvalidAuthKey, tx_api.accept, self.ctxt, transfer["id"], "wrong")

        calls = [
            mock.call(self.ctxt, mock.ANY, "transfer.create.start"),
            mock.call(self.ctxt, mock.ANY, "transfer.create.end"),
        ]
        mock_notify.assert_has_calls(calls)
        self.assertEqual(2, mock_notify.call_count)

        db.volume_update(self.ctxt, "1", {"status": "wrong"})
        self.assertRaises(exception.InvalidVolume, tx_api.accept, self.ctxt, transfer["id"], transfer["auth_key"])
        db.volume_update(self.ctxt, "1", {"status": "awaiting-transfer"})

        # Because the InvalidVolume exception is raised in tx_api, so there is
        # only transfer.accept.start called and missing transfer.accept.end.
        calls = [mock.call(self.ctxt, mock.ANY, "transfer.accept.start")]
        mock_notify.assert_has_calls(calls)
        self.assertEqual(3, mock_notify.call_count)

        self.ctxt.user_id = "new_user_id"
        self.ctxt.project_id = "new_project_id"
        response = tx_api.accept(self.ctxt, transfer["id"], transfer["auth_key"])
        volume = db.volume_get(self.ctxt, "1")
        self.assertEqual("new_project_id", volume["project_id"], "Unexpected project id")
        self.assertEqual("new_user_id", volume["user_id"], "Unexpected user id")

        self.assertEqual(volume["id"], response["volume_id"], "Unexpected volume id in response.")
        self.assertEqual(transfer["id"], response["id"], "Unexpected transfer id in response.")

        calls = [
            mock.call(self.ctxt, mock.ANY, "transfer.accept.start"),
            mock.call(self.ctxt, mock.ANY, "transfer.accept.end"),
        ]
        mock_notify.assert_has_calls(calls)
        self.assertEqual(5, mock_notify.call_count)

        svc.stop()
Esempio n. 53
0
class SchedulerManager(manager.Manager):
    """Chooses a host to create volumes"""

    RPC_API_VERSION = '1.0'

    def __init__(self, scheduler_driver=None, *args, **kwargs):
        if not scheduler_driver:
            scheduler_driver = FLAGS.scheduler_driver
        self.driver = importutils.import_object(scheduler_driver)
        super(SchedulerManager, self).__init__(*args, **kwargs)

    def __getattr__(self, key):
        """Converts all method calls to use the schedule method"""
        # NOTE(russellb) Because of what this is doing, we must be careful
        # when changing the API of the scheduler drivers, as that changes
        # the rpc API as well, and the version should be updated accordingly.
        return functools.partial(self._schedule, key)

    def get_host_list(self, context):
        """Get a list of hosts from the HostManager."""
        return self.driver.get_host_list()

    def get_service_capabilities(self, context):
        """Get the normalized set of capabilities for this zone."""
        return self.driver.get_service_capabilities()

    def update_service_capabilities(self,
                                    context,
                                    service_name=None,
                                    host=None,
                                    capabilities=None,
                                    **kwargs):
        """Process a capability update from a service node."""
        if capabilities is None:
            capabilities = {}
        self.driver.update_service_capabilities(service_name, host,
                                                capabilities)

    def _schedule(self, method, context, topic, *args, **kwargs):
        """Tries to call schedule_* method on the driver to retrieve host.
        Falls back to schedule(context, topic) if method doesn't exist.
        """
        driver_method_name = 'schedule_%s' % method
        try:
            driver_method = getattr(self.driver, driver_method_name)
            args = (context, ) + args
        except AttributeError, e:
            LOG.warning(
                _("Driver Method %(driver_method_name)s missing: "
                  "%(e)s. Reverting to schedule()") % locals())
            driver_method = self.driver.schedule
            args = (context, topic, method) + args

        try:
            return driver_method(*args, **kwargs)
        except Exception:
            with excutils.save_and_reraise_exception():
                volume_id = kwargs.get('volume_id')
                db.volume_update(context, volume_id, {'status': 'error'})
Esempio n. 54
0
 def _fake_create_volume(self,
                         ctxt,
                         volume,
                         req_spec,
                         filters,
                         allow_reschedule=True):
     return db.volume_update(ctxt, volume['id'],
                             {'status': self.expected_status})
Esempio n. 55
0
 def test_migrate_volume_migrating(self):
     expected_status = 400
     host = 'test2'
     ctx = context.RequestContext('admin', 'fake', True)
     volume = self._migrate_volume_prep()
     model_update = {'migration_status': 'migrating'}
     volume = db.volume_update(ctx, volume['id'], model_update)
     self._migrate_volume_exec(ctx, volume, host, expected_status)
Esempio n. 56
0
 def test_migrate_volume_migrating(self):
     expected_status = 400
     host = "test2"
     ctx = context.RequestContext("admin", "fake", True)
     volume = self._migrate_volume_prep()
     model_update = {"migration_status": "migrating"}
     volume = db.volume_update(ctx, volume["id"], model_update)
     self._migrate_volume_exec(ctx, volume, host, expected_status)
Esempio n. 57
0
    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            if 'metadata' in updates:
                # Metadata items that are not specified in the
                # self.metadata will be deleted
                metadata = updates.pop('metadata', None)
                self.metadata = db.volume_metadata_update(self._context,
                                                          self.id, metadata,
                                                          True)
            if self._context.is_admin and 'admin_metadata' in updates:
                metadata = updates.pop('admin_metadata', None)
                self.admin_metadata = db.volume_admin_metadata_update(
                    self._context, self.id, metadata, True)

            db.volume_update(self._context, self.id, updates)
            self.obj_reset_changes()
Esempio n. 58
0
 def test_migrate_volume_migrating(self):
     expected_status = 400
     host = 'test2'
     ctx = context.RequestContext('admin', 'fake', True)
     volume = self._migrate_volume_prep()
     model_update = {'migration_status': 'migrating'}
     volume = db.volume_update(ctx, volume['id'], model_update)
     self._migrate_volume_exec(ctx, volume, host, expected_status)
Esempio n. 59
0
    def save(self):
        updates = self.cinder_obj_get_changes()
        if updates:
            if 'consistencygroup' in updates:
                # NOTE(xyang): Allow this to pass if 'consistencygroup' is
                # set to None. This is to support backward compatibility.
                if updates.get('consistencygroup'):
                    raise exception.ObjectActionError(
                        action='save', reason=_('consistencygroup changed'))
            if 'group' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('group changed'))
            if 'glance_metadata' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('glance_metadata changed'))
            if 'snapshots' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('snapshots changed'))
            if 'cluster' in updates:
                raise exception.ObjectActionError(
                    action='save', reason=_('cluster changed'))
            if 'metadata' in updates:
                # Metadata items that are not specified in the
                # self.metadata will be deleted
                metadata = updates.pop('metadata', None)
                self.metadata = db.volume_metadata_update(self._context,
                                                          self.id, metadata,
                                                          True)
            if self._context.is_admin and 'admin_metadata' in updates:
                metadata = updates.pop('admin_metadata', None)
                self.admin_metadata = db.volume_admin_metadata_update(
                    self._context, self.id, metadata, True)

            # When we are creating a volume and we change from 'creating'
            # status to 'downloading' status we have to change the worker entry
            # in the DB to reflect this change, otherwise the cleanup will
            # not be performed as it will be mistaken for a volume that has
            # been somehow changed (reset status, forced operation...)
            if updates.get('status') == 'downloading':
                self.set_worker()

            # updates are changed after popping out metadata.
            if updates:
                db.volume_update(self._context, self.id, updates)
            self.obj_reset_changes()