Esempio n. 1
0
    def _shelve_instance(self,
                         shelved_offload_time,
                         mock_notify,
                         mock_notify_instance_usage,
                         mock_get_power_state,
                         mock_snapshot,
                         mock_power_off,
                         clean_shutdown=True):
        mock_get_power_state.return_value = 123

        CONF.set_override('shelved_offload_time', shelved_offload_time)
        host = 'fake-mini'
        instance = self._create_fake_instance_obj(params={'host': host})
        image_id = 'fake_image_id'
        host = 'fake-mini'
        self.useFixture(utils_fixture.TimeFixture())
        instance.task_state = task_states.SHELVING
        instance.save()

        tracking = {'last_state': instance.vm_state}

        def check_save(expected_task_state=None):
            self.assertEqual(123, instance.power_state)
            if tracking['last_state'] == vm_states.ACTIVE:
                if CONF.shelved_offload_time == 0:
                    self.assertEqual(task_states.SHELVING_OFFLOADING,
                                     instance.task_state)
                else:
                    self.assertIsNone(instance.task_state)
                self.assertEqual(vm_states.SHELVED, instance.vm_state)
                self.assertEqual([
                    task_states.SHELVING, task_states.SHELVING_IMAGE_UPLOADING
                ], expected_task_state)
                self.assertIn('shelved_at', instance.system_metadata)
                self.assertEqual(image_id,
                                 instance.system_metadata['shelved_image_id'])
                self.assertEqual(host,
                                 instance.system_metadata['shelved_host'])
                tracking['last_state'] = instance.vm_state
            elif (tracking['last_state'] == vm_states.SHELVED
                  and CONF.shelved_offload_time == 0):
                self.assertIsNone(instance.task_state)
                self.assertEqual(vm_states.SHELVED_OFFLOADED,
                                 instance.vm_state)
                self.assertEqual(
                    [task_states.SHELVING, task_states.SHELVING_OFFLOADING],
                    expected_task_state)
                tracking['last_state'] = instance.vm_state
            elif (tracking['last_state'] == vm_states.SHELVED_OFFLOADED
                  and CONF.shelved_offload_time == 0):
                self.assertIsNone(instance.host)
                self.assertIsNone(instance.node)
                self.assertIsNone(expected_task_state)
            else:
                self.fail('Unexpected save!')

        with test.nested(
                mock.patch.object(instance, 'save'),
                mock.patch.object(
                    self.compute.network_api,
                    'cleanup_instance_network_on_host')) as (mock_save,
                                                             mock_cleanup):
            mock_save.side_effect = check_save
            self.compute.shelve_instance(self.context,
                                         instance,
                                         image_id=image_id,
                                         clean_shutdown=clean_shutdown)
            mock_notify.assert_has_calls([
                mock.call(self.context,
                          instance,
                          'fake-mini',
                          action='shelve',
                          phase='start'),
                mock.call(self.context,
                          instance,
                          'fake-mini',
                          action='shelve',
                          phase='end')
            ])

        # prepare expect call lists
        mock_notify_instance_usage_call_list = [
            mock.call(self.context, instance, 'shelve.start'),
            mock.call(self.context, instance, 'shelve.end')
        ]
        mock_power_off_call_list = []
        mock_get_power_state_call_list = [mock.call(self.context, instance)]
        mock_cleanup_call_list = []

        if clean_shutdown:
            mock_power_off_call_list.append(
                mock.call(instance, CONF.shutdown_timeout,
                          self.compute.SHUTDOWN_RETRY_INTERVAL))
        else:
            mock_power_off_call_list.append(mock.call(instance, 0, 0))

        if CONF.shelved_offload_time == 0:
            mock_notify_instance_usage_call_list.extend([
                mock.call(self.context, instance, 'shelve_offload.start'),
                mock.call(self.context, instance, 'shelve_offload.end')
            ])
            mock_power_off_call_list.append(mock.call(instance, 0, 0))
            mock_get_power_state_call_list.append(
                mock.call(self.context, instance))
            # instance.host is replaced with host because
            # original instance.host is clear after
            # ComputeManager.shelve_instance execute with
            # shelved_offload_time == 0
            mock_cleanup_call_list.append(
                mock.call(self.context, instance, host))

        mock_notify_instance_usage.assert_has_calls(
            mock_notify_instance_usage_call_list)
        mock_power_off.assert_has_calls(mock_power_off_call_list)
        mock_cleanup.assert_has_calls(mock_cleanup_call_list)
        mock_snapshot.assert_called_once_with(self.context, instance,
                                              'fake_image_id', mock.ANY)
        mock_get_power_state.assert_has_calls(mock_get_power_state_call_list)
Esempio n. 2
0
    def _test_import_copy(self, warp_time=False):
        self.start_server()
        state = {'want_run': True}

        # Create and import an image with no pipeline stall
        image_id = self._create_and_import(stores=['store1'])

        # Set up a fake data pipeline that will stall until we are ready
        # to unblock it
        def slow_fake_set_data(data_iter, backend=None, set_active=True):
            me = str(uuid.uuid4())
            while state['want_run'] == True:
                LOG.info('fake_set_data running %s' % me)
                state['running'] = True
                time.sleep(0.1)
            LOG.info('fake_set_data ended %s' % me)

        # Constrain oslo timeutils time so we can manipulate it
        tf = time_fixture.TimeFixture()
        self.useFixture(tf)

        # Turn on the delayed data pipeline and start a copy-image
        # import which will hang out for a while
        with mock.patch('glance.domain.proxy.Image.set_data') as mock_sd:
            mock_sd.side_effect = slow_fake_set_data

            resp = self._import_copy(image_id, ['store2'])
            self.addDetail('First import response',
                           ttc.text_content(str(resp)))
            self.assertEqual(202, resp.status_code)

            # Wait to make sure the data stream gets started
            for i in range(0, 10):
                if 'running' in state:
                    break
                time.sleep(0.1)

        # Make sure the first import got to the point where the
        # hanging loop will hold it in processing state
        self.assertTrue(state.get('running', False),
                        'slow_fake_set_data() never ran')

        # Make sure the task is available and in the right state
        first_import_task = self._get_image_import_task(image_id)
        self.assertEqual('processing', first_import_task['status'])

        # If we're warping time, then advance the clock by two hours
        if warp_time:
            tf.advance_time_delta(datetime.timedelta(hours=2))

        # Try a second copy-image import. If we are warping time,
        # expect the lock to be busted. If not, then we should get
        # a 409 Conflict.
        resp = self._import_copy(image_id, ['store3'])
        time.sleep(0.1)

        self.addDetail('Second import response',
                       ttc.text_content(str(resp)))
        if warp_time:
            self.assertEqual(202, resp.status_code)
        else:
            self.assertEqual(409, resp.status_code)

        self.addDetail('First task', ttc.text_content(str(first_import_task)))

        # Grab the current import task for our image, and also
        # refresh our first task object
        second_import_task = self._get_image_import_task(image_id)
        first_import_task = self._get_image_import_task(
            image_id, first_import_task['id'])

        if warp_time:
            # If we warped time and busted the lock, then we expect the
            # current task to be different than the original task
            self.assertNotEqual(first_import_task['id'],
                                second_import_task['id'])
            # The original task should be failed with the expected message
            self.assertEqual('failure', first_import_task['status'])
            self.assertEqual('Expired lock preempted',
                             first_import_task['message'])
            # The new task should be off and running
            self.assertEqual('processing', second_import_task['status'])
        else:
            # We didn't bust the lock, so we didn't start another
            # task, so confirm it hasn't changed
            self.assertEqual(first_import_task['id'],
                             second_import_task['id'])

        return image_id, state
Esempio n. 3
0
    def _shelve_instance(self, shelved_offload_time, mock_notify,
                         mock_notify_instance_usage, mock_get_power_state,
                         mock_snapshot, mock_power_off, mock_terminate,
                         mock_get_bdms, clean_shutdown=True,
                         guest_power_state=power_state.RUNNING):
        mock_get_power_state.return_value = 123

        CONF.set_override('shelved_offload_time', shelved_offload_time)
        host = 'fake-mini'
        instance = self._create_fake_instance_obj(
            params={'host': host, 'power_state': guest_power_state})
        image_id = 'fake_image_id'
        host = 'fake-mini'
        self.useFixture(utils_fixture.TimeFixture())
        instance.task_state = task_states.SHELVING
        instance.save()

        fake_bdms = None
        if shelved_offload_time == 0:
            fake_bdms = objects.BlockDeviceMappingList()
            mock_get_bdms.return_value = fake_bdms

        tracking = {'last_state': instance.vm_state}

        def check_save(expected_task_state=None):
            self.assertEqual(123, instance.power_state)
            if tracking['last_state'] == vm_states.ACTIVE:
                if CONF.shelved_offload_time == 0:
                    self.assertEqual(task_states.SHELVING_OFFLOADING,
                                     instance.task_state)
                else:
                    self.assertIsNone(instance.task_state)
                self.assertEqual(vm_states.SHELVED, instance.vm_state)
                self.assertEqual([task_states.SHELVING,
                                  task_states.SHELVING_IMAGE_UPLOADING],
                                 expected_task_state)
                self.assertIn('shelved_at', instance.system_metadata)
                self.assertEqual(image_id,
                                 instance.system_metadata['shelved_image_id'])
                self.assertEqual(host,
                                 instance.system_metadata['shelved_host'])
                tracking['last_state'] = instance.vm_state
            elif (tracking['last_state'] == vm_states.SHELVED and
                  CONF.shelved_offload_time == 0):
                self.assertIsNone(instance.task_state)
                self.assertEqual(vm_states.SHELVED_OFFLOADED,
                                 instance.vm_state)
                self.assertEqual([task_states.SHELVING,
                                  task_states.SHELVING_OFFLOADING],
                                 expected_task_state)
                tracking['last_state'] = instance.vm_state
            elif (tracking['last_state'] == vm_states.SHELVED_OFFLOADED and
                  CONF.shelved_offload_time == 0):
                self.assertIsNone(instance.host)
                self.assertIsNone(instance.node)
                self.assertIsNone(expected_task_state)
            else:
                self.fail('Unexpected save!')

        with test.nested(
                mock.patch.object(instance, 'save'),
                mock.patch.object(self.compute.network_api,
                                  'cleanup_instance_network_on_host')) as (
            mock_save, mock_cleanup
        ):
            mock_save.side_effect = check_save
            self.compute.shelve_instance(self.context, instance,
                                         image_id=image_id,
                                         clean_shutdown=clean_shutdown)
            mock_notify.assert_has_calls([
                mock.call(self.context, instance, 'fake-mini',
                          action='shelve', phase='start', bdms=fake_bdms),
                mock.call(self.context, instance, 'fake-mini',
                          action='shelve', phase='end', bdms=fake_bdms)])

        # prepare expect call lists
        mock_notify_instance_usage_call_list = [
            mock.call(self.context, instance, 'shelve.start'),
            mock.call(self.context, instance, 'shelve.end')]
        mock_power_off_call_list = []
        mock_get_power_state_call_list = [mock.call(instance)]

        if clean_shutdown:
            if guest_power_state == power_state.PAUSED:
                mock_power_off_call_list.append(mock.call(instance, 0, 0))
            else:
                mock_power_off_call_list.append(
                    mock.call(instance, CONF.shutdown_timeout,
                              CONF.compute.shutdown_retry_interval))
        else:
            mock_power_off_call_list.append(mock.call(instance, 0, 0))

        if CONF.shelved_offload_time == 0:
            mock_notify_instance_usage_call_list.extend([
                mock.call(self.context, instance, 'shelve_offload.start'),
                mock.call(self.context, instance, 'shelve_offload.end')])
            mock_power_off_call_list.append(mock.call(instance, 0, 0))
            mock_get_power_state_call_list.append(mock.call(instance))

        mock_notify_instance_usage.assert_has_calls(
            mock_notify_instance_usage_call_list)
        mock_power_off.assert_has_calls(mock_power_off_call_list)
        mock_cleanup.assert_not_called()
        mock_snapshot.assert_called_once_with(self.context, instance,
                                              'fake_image_id', mock.ANY)
        mock_get_power_state.assert_has_calls(mock_get_power_state_call_list)

        if CONF.shelved_offload_time == 0:
            self.assertTrue(mock_terminate.called)
Esempio n. 4
0
 def setUp(self):
     super(LockoutTestCase, self).setUp()
     self.time_fixture = self.useFixture(utils_fixture.TimeFixture())
     self.lockout = ec2.Lockout(conditional_forbid)
Esempio n. 5
0
    def _shelve_instance(self, shelved_offload_time, clean_shutdown=True):
        CONF.set_override('shelved_offload_time', shelved_offload_time)
        host = 'fake-mini'
        instance = self._create_fake_instance_obj(params={'host': host})
        image_id = 'fake_image_id'
        host = 'fake-mini'
        self.useFixture(utils_fixture.TimeFixture())
        instance.task_state = task_states.SHELVING
        instance.save()

        self.mox.StubOutWithMock(self.compute, '_notify_about_instance_usage')
        self.mox.StubOutWithMock(self.compute.driver, 'snapshot')
        self.mox.StubOutWithMock(self.compute.driver, 'power_off')
        self.mox.StubOutWithMock(self.compute, '_get_power_state')
        self.mox.StubOutWithMock(self.compute.network_api,
                                 'cleanup_instance_network_on_host')

        self.compute._notify_about_instance_usage(self.context, instance,
                                                  'shelve.start')
        if clean_shutdown:
            self.compute.driver.power_off(instance, CONF.shutdown_timeout,
                                          self.compute.SHUTDOWN_RETRY_INTERVAL)
        else:
            self.compute.driver.power_off(instance, 0, 0)
        self.compute._get_power_state(self.context, instance).AndReturn(123)
        if CONF.shelved_offload_time == 0:
            self.compute.network_api.cleanup_instance_network_on_host(
                self.context, instance, instance.host)
        self.compute.driver.snapshot(self.context, instance, 'fake_image_id',
                                     mox.IgnoreArg())

        tracking = {'last_state': instance.vm_state}

        def check_save(expected_task_state=None):
            self.assertEqual(123, instance.power_state)
            if tracking['last_state'] == vm_states.ACTIVE:
                if CONF.shelved_offload_time == 0:
                    self.assertEqual(task_states.SHELVING_OFFLOADING,
                                     instance.task_state)
                else:
                    self.assertIsNone(instance.task_state)
                self.assertEqual(vm_states.SHELVED, instance.vm_state)
                self.assertEqual([
                    task_states.SHELVING, task_states.SHELVING_IMAGE_UPLOADING
                ], expected_task_state)
                self.assertIn('shelved_at', instance.system_metadata)
                self.assertEqual(image_id,
                                 instance.system_metadata['shelved_image_id'])
                self.assertEqual(host,
                                 instance.system_metadata['shelved_host'])
                tracking['last_state'] = instance.vm_state
            elif (tracking['last_state'] == vm_states.SHELVED
                  and CONF.shelved_offload_time == 0):
                self.assertIsNone(instance.host)
                self.assertIsNone(instance.node)
                self.assertIsNone(instance.task_state)
                self.assertEqual(vm_states.SHELVED_OFFLOADED,
                                 instance.vm_state)
                self.assertEqual(
                    [task_states.SHELVING, task_states.SHELVING_OFFLOADING],
                    expected_task_state)
                tracking['last_state'] = instance.vm_state
            else:
                self.fail('Unexpected save!')

        self.compute._notify_about_instance_usage(self.context, instance,
                                                  'shelve.end')
        if CONF.shelved_offload_time == 0:
            self.compute._notify_about_instance_usage(self.context, instance,
                                                      'shelve_offload.start')
            self.compute.driver.power_off(instance, 0, 0)
            self.compute._get_power_state(self.context,
                                          instance).AndReturn(123)
            self.compute._notify_about_instance_usage(self.context, instance,
                                                      'shelve_offload.end')
        self.mox.ReplayAll()

        with mock.patch.object(instance, 'save') as mock_save:
            mock_save.side_effect = check_save
            self.compute.shelve_instance(self.context,
                                         instance,
                                         image_id=image_id,
                                         clean_shutdown=clean_shutdown)
Esempio n. 6
0
    def _shelve_offload(self,
                        mock_notify,
                        mock_notify_instance_usage,
                        mock_get_power_state,
                        mock_update_resource_tracker,
                        mock_delete_alloc,
                        mock_terminate,
                        mock_get_bdms,
                        mock_event,
                        clean_shutdown=True):
        host = 'fake-mini'
        instance = self._create_fake_instance_obj(params={'host': host})
        instance.task_state = task_states.SHELVING
        instance.save()
        self.useFixture(utils_fixture.TimeFixture())
        fake_bdms = objects.BlockDeviceMappingList()
        mock_get_bdms.return_value = fake_bdms

        with mock.patch.object(instance, 'save'):
            self.compute.shelve_offload_instance(self.context,
                                                 instance,
                                                 clean_shutdown=clean_shutdown)
            mock_notify.assert_has_calls([
                mock.call(self.context,
                          instance,
                          'fake-mini',
                          action='shelve_offload',
                          phase='start',
                          bdms=fake_bdms),
                mock.call(self.context,
                          instance,
                          'fake-mini',
                          action='shelve_offload',
                          phase='end',
                          bdms=fake_bdms)
            ])

        self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
        self.assertIsNone(instance.task_state)
        self.assertTrue(mock_terminate.called)

        # prepare expect call lists
        mock_notify_instance_usage_call_list = [
            mock.call(self.context, instance, 'shelve_offload.start'),
            mock.call(self.context, instance, 'shelve_offload.end')
        ]

        mock_notify_instance_usage.assert_has_calls(
            mock_notify_instance_usage_call_list)
        # instance.host is replaced with host because
        # original instance.host is clear after
        # ComputeManager.shelve_offload_instance execute
        mock_get_power_state.assert_called_once_with(self.context, instance)
        mock_update_resource_tracker.assert_called_once_with(
            self.context, instance)
        mock_delete_alloc.assert_called_once_with(self.context, instance)
        mock_event.assert_called_once_with(self.context,
                                           'compute_shelve_offload_instance',
                                           instance.uuid)

        return instance
Esempio n. 7
0
    def _shelve_offload(self,
                        mock_notify,
                        mock_notify_instance_usage,
                        mock_get_power_state,
                        mock_update_resource_tracker,
                        mock_delete_alloc,
                        mock_terminate,
                        mock_get_bdms,
                        mock_event,
                        clean_shutdown=True):
        host = 'fake-mini'
        instance = self._create_fake_instance_obj(params={'host': host})
        instance.task_state = task_states.SHELVING
        instance.save()
        self.useFixture(utils_fixture.TimeFixture())
        fake_bdms = objects.BlockDeviceMappingList()
        mock_get_bdms.return_value = fake_bdms

        def stub_instance_save(inst, *args, **kwargs):
            # If the vm_state is changed to SHELVED_OFFLOADED make sure we
            # have already freed up allocations in placement.
            if inst.vm_state == vm_states.SHELVED_OFFLOADED:
                self.assertTrue(
                    mock_delete_alloc.called,
                    'Allocations must be deleted before the '
                    'vm_status can change to shelved_offloaded.')

        self.stub_out('nova.objects.Instance.save', stub_instance_save)
        self.compute.shelve_offload_instance(self.context,
                                             instance,
                                             clean_shutdown=clean_shutdown)
        mock_notify.assert_has_calls([
            mock.call(self.context,
                      instance,
                      'fake-mini',
                      action='shelve_offload',
                      phase='start',
                      bdms=fake_bdms),
            mock.call(self.context,
                      instance,
                      'fake-mini',
                      action='shelve_offload',
                      phase='end',
                      bdms=fake_bdms)
        ])

        self.assertEqual(vm_states.SHELVED_OFFLOADED, instance.vm_state)
        self.assertIsNone(instance.task_state)
        self.assertTrue(mock_terminate.called)

        # prepare expect call lists
        mock_notify_instance_usage_call_list = [
            mock.call(self.context, instance, 'shelve_offload.start'),
            mock.call(self.context, instance, 'shelve_offload.end')
        ]

        mock_notify_instance_usage.assert_has_calls(
            mock_notify_instance_usage_call_list)
        # instance.host is replaced with host because
        # original instance.host is clear after
        # ComputeManager.shelve_offload_instance execute
        mock_get_power_state.assert_called_once_with(self.context, instance)
        mock_update_resource_tracker.assert_called_once_with(
            self.context, instance)
        mock_delete_alloc.assert_called_once_with(self.context, instance)
        mock_event.assert_called_once_with(self.context,
                                           'compute_shelve_offload_instance',
                                           CONF.host,
                                           instance.uuid,
                                           graceful_exit=False)

        return instance
Esempio n. 8
0
    def test_remove_stale_clients(self, mock_rpcclient, mock_create, mock_get):
        t0 = datetime.datetime(2016, 8, 9, 0, 0, 0)
        time_fixture = self.useFixture(utils_fixture.TimeFixture(t0))

        default_client = mock.Mock()
        ctxt = mock.Mock()

        cm1 = objects.CellMapping(uuid=uuids.cell_mapping1,
                                  transport_url='fake:///')
        cm2 = objects.CellMapping(uuid=uuids.cell_mapping2,
                                  transport_url='fake:///')
        cm3 = objects.CellMapping(uuid=uuids.cell_mapping3,
                                  transport_url='fake:///')
        mock_get.side_effect = [
            objects.InstanceMapping(cell_mapping=cm1),
            objects.InstanceMapping(cell_mapping=cm2),
            objects.InstanceMapping(cell_mapping=cm3),
            objects.InstanceMapping(cell_mapping=cm3)
        ]
        instance1 = objects.Instance(uuid=uuids.instance1)
        instance2 = objects.Instance(uuid=uuids.instance2)
        instance3 = objects.Instance(uuid=uuids.instance3)

        router = rpc.ClientRouter(default_client)
        cell1_client = router.by_instance(ctxt, instance1)
        cell2_client = router.by_instance(ctxt, instance2)

        # default client, cell1 client, cell2 client
        self.assertEqual(3, len(router.clients))
        expected = {
            'default': default_client,
            uuids.cell_mapping1: cell1_client,
            uuids.cell_mapping2: cell2_client
        }
        for client_id, client in expected.items():
            self.assertEqual(client, router.clients[client_id].client)

        # expire cell1 client and cell2 client
        time_fixture.advance_time_seconds(80)

        # add cell3 client
        cell3_client = router.by_instance(ctxt, instance3)

        router._remove_stale_clients(ctxt)

        # default client, cell3 client
        expected = {
            'default': default_client,
            uuids.cell_mapping3: cell3_client
        }
        self.assertEqual(2, len(router.clients))
        for client_id, client in expected.items():
            self.assertEqual(client, router.clients[client_id].client)

        # expire cell3 client
        time_fixture.advance_time_seconds(80)

        # access cell3 client to refresh it
        cell3_client = router.by_instance(ctxt, instance3)

        router._remove_stale_clients(ctxt)

        # default client and cell3 client should be there
        self.assertEqual(2, len(router.clients))
        for client_id, client in expected.items():
            self.assertEqual(client, router.clients[client_id].client)
Esempio n. 9
0
'''
need to install:
    pip install fixtures

or it reports: 
    ImportError: No module named fixtures
'''

from oslo_utils import fixture
from oslo_utils import timeutils
import time

print(timeutils.utcnow.override_time)
with fixture.TimeFixture():
    print(timeutils.utcnow.override_time)
    time.sleep(1)
    print(timeutils.utcnow.override_time)

print(timeutils.utcnow.override_time)