예제 #1
0
    def test_deferred_delete(self):
        # Creates, deletes and waits for server to be reclaimed.
        self.flags(reclaim_instance_interval=1)
        fake_network.set_stub_network_methods(self.stubs)

        # Create server
        server = self._build_minimal_create_server_request()

        created_server = self.api.post_server({'server': server})
        LOG.debug("created_server: %s" % created_server)
        self.assertTrue(created_server['id'])
        created_server_id = created_server['id']

        # Wait for it to finish being created
        found_server = self._wait_for_state_change(created_server, 'BUILD')

        # It should be available...
        self.assertEqual('ACTIVE', found_server['status'])

        # Cannot restore unless instance is deleted
        self.assertRaises(client.OpenStackApiException,
                          self.api.post_server_action, created_server_id,
                          {'restore': {}})

        # Delete the server
        self.api.delete_server(created_server_id)

        # Wait for queued deletion
        found_server = self._wait_for_state_change(found_server, 'ACTIVE')
        self.assertEqual('SOFT_DELETED', found_server['status'])

        self._force_reclaim()

        # Wait for real deletion
        self._wait_for_deletion(created_server_id)
예제 #2
0
    def setUp(self):
        def fake_get_nw_info(cls, ctxt, instance):
            self.assertTrue(ctxt.is_admin)
            return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)

        super(UsageInfoTestCase, self).setUp()
        self.stubs.Set(network_api.API, 'get_instance_nw_info',
                       fake_get_nw_info)

        fake_notifier.stub_notifier(self.stubs)
        self.addCleanup(fake_notifier.reset)

        self.flags(use_local=True, group='conductor')
        self.flags(compute_driver='nova.virt.fake.FakeDriver',
                   network_manager='nova.network.manager.FlatManager')
        self.compute = importutils.import_object(CONF.compute_manager)
        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id, self.project_id)

        def fake_show(meh, context, id):
            return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}

        self.stubs.Set(nova.tests.image.fake._FakeImageService,
                       'show', fake_show)
        fake_network.set_stub_network_methods(self.stubs)
        fake_instance_actions.stub_out_action_events(self.stubs)
예제 #3
0
    def test_deferred_delete_force(self):
        # Creates, deletes and force deletes a server.
        self.flags(reclaim_instance_interval=3600)
        fake_network.set_stub_network_methods(self.stubs)

        # Create server
        server = self._build_minimal_create_server_request()

        created_server = self.api.post_server({'server': server})
        LOG.debug("created_server: %s" % created_server)
        self.assertTrue(created_server['id'])
        created_server_id = created_server['id']

        # Wait for it to finish being created
        found_server = self._wait_for_state_change(created_server, 'BUILD')

        # It should be available...
        self.assertEqual('ACTIVE', found_server['status'])

        # Delete the server
        self.api.delete_server(created_server_id)

        # Wait for queued deletion
        found_server = self._wait_for_state_change(found_server, 'ACTIVE')
        self.assertEqual('DELETED', found_server['status'])

        # Force delete server
        self.api.post_server_action(created_server_id, {'forceDelete': {}})

        # Wait for real deletion
        self._wait_for_deletion(created_server_id)
예제 #4
0
    def test_create_and_delete_server(self):
        """Creates and deletes a server."""
        fake_network.set_stub_network_methods(self.stubs)

        # Create server
        # Build the server data gradually, checking errors along the way
        server = {}
        good_server = self._build_minimal_create_server_request()

        post = {"server": server}

        # Without an imageRef, this throws 500.
        # TODO(justinsb): Check whatever the spec says should be thrown here
        self.assertRaises(client.OpenStackApiException, self.api.post_server, post)

        # With an invalid imageRef, this throws 500.
        server["imageRef"] = self.get_invalid_image()
        # TODO(justinsb): Check whatever the spec says should be thrown here
        self.assertRaises(client.OpenStackApiException, self.api.post_server, post)

        # Add a valid imageRef
        server["imageRef"] = good_server.get("imageRef")

        # Without flavorRef, this throws 500
        # TODO(justinsb): Check whatever the spec says should be thrown here
        self.assertRaises(client.OpenStackApiException, self.api.post_server, post)

        server["flavorRef"] = good_server.get("flavorRef")

        # Without a name, this throws 500
        # TODO(justinsb): Check whatever the spec says should be thrown here
        self.assertRaises(client.OpenStackApiException, self.api.post_server, post)

        # Set a valid server name
        server["name"] = good_server["name"]

        created_server = self.api.post_server(post)
        LOG.debug("created_server: %s" % created_server)
        self.assertTrue(created_server["id"])
        created_server_id = created_server["id"]

        # Check it's there
        found_server = self.api.get_server(created_server_id)
        self.assertEqual(created_server_id, found_server["id"])

        # It should also be in the all-servers list
        servers = self.api.get_servers()
        server_ids = [server["id"] for server in servers]
        self.assertTrue(created_server_id in server_ids)

        found_server = self._wait_for_state_change(found_server, "BUILD")
        # It should be available...
        # TODO(justinsb): Mock doesn't yet do this...
        self.assertEqual("ACTIVE", found_server["status"])
        servers = self.api.get_servers(detail=True)
        for server in servers:
            self.assertTrue("image" in server)
            self.assertTrue("flavor" in server)

        self._delete_server(created_server_id)
예제 #5
0
    def setUp(self):
        super(CinderCloudTestCase, self).setUp()
        vol_tmpdir = tempfile.mkdtemp()
        self.flags(compute_driver='nova.virt.fake.FakeDriver',
                   volume_api_class='nova.tests.fake_volume.API')

        def fake_show(meh, context, id):
            return {
                'id': id,
                'name': 'fake_name',
                'container_format': 'ami',
                'status': 'active',
                'properties': {
                    'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                    'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                    'type': 'machine',
                    'image_state': 'available'
                }
            }

        def fake_detail(_self, context, **kwargs):
            image = fake_show(None, context, None)
            image['name'] = kwargs.get('filters', {}).get('name')
            return [image]

        self.stubs.Set(fake._FakeImageService, 'show', fake_show)
        self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
        fake.stub_out_image_service(self.stubs)

        def dumb(*args, **kwargs):
            pass

        self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
        fake_network.set_stub_network_methods(self.stubs)

        # set up our cloud
        self.cloud = cloud.CloudController()
        self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')

        # set up services
        self.compute = self.start_service('compute')
        self.scheduler = self.start_service('scheduler')
        self.network = self.start_service('network')

        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id,
                                              self.project_id,
                                              is_admin=True)
        self.volume_api = volume.API()

        # NOTE(comstud): Make 'cast' behave like a 'call' which will
        # ensure that operations complete
        self.stubs.Set(rpc, 'cast', rpc.call)

        # make sure we can map ami-00000001/2 to a uuid in FakeImageService
        db.api.s3_image_create(self.context,
                               'cedef40a-ed67-4d10-800e-17455edce175')
        db.api.s3_image_create(self.context,
                               '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
예제 #6
0
    def setUp(self):
        super(NotificationsTestCase, self).setUp()

        self.net_info = fake_network.fake_get_instance_nw_info(self.stubs, 1, 1)

        def fake_get_nw_info(cls, ctxt, instance):
            self.assertTrue(ctxt.is_admin)
            return self.net_info

        self.stubs.Set(network_api.API, "get_instance_nw_info", fake_get_nw_info)
        fake_network.set_stub_network_methods(self.stubs)

        fake_notifier.stub_notifier(self.stubs)
        self.addCleanup(fake_notifier.reset)

        self.flags(
            compute_driver="nova.virt.fake.FakeDriver",
            network_manager="nova.network.manager.FlatManager",
            notify_on_state_change="vm_and_task_state",
            host="testhost",
        )

        self.user_id = "fake"
        self.project_id = "fake"
        self.context = context.RequestContext(self.user_id, self.project_id)

        self.instance = self._wrapped_create()
예제 #7
0
    def setUp(self):
        super(NotificationsTestCase, self).setUp()

        self.net_info = fake_network.fake_get_instance_nw_info(
            self.stubs, 1, 1, spectacular=True)

        def fake_get_nw_info(cls, ctxt, instance):
            self.assertTrue(ctxt.is_admin)
            return self.net_info

        self.stubs.Set(network_api.API, 'get_instance_nw_info',
                       fake_get_nw_info)
        fake_network.set_stub_network_methods(self.stubs)

        self.flags(compute_driver='nova.virt.fake.FakeDriver',
                   notification_driver=[test_notifier.__name__],
                   network_manager='nova.network.manager.FlatManager',
                   notify_on_state_change="vm_and_task_state",
                   host='testhost')

        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id, self.project_id)
        test_notifier.NOTIFICATIONS = []

        self.instance = self._wrapped_create()
예제 #8
0
    def setUp(self):
        super(NotificationsTestCase, self).setUp()

        self.net_info = fake_network.fake_get_instance_nw_info(self.stubs, 1,
                                                        1, spectacular=True)

        def fake_get_nw_info(cls, ctxt, instance):
            self.assertTrue(ctxt.is_admin)
            return self.net_info

        self.stubs.Set(network_api.API, 'get_instance_nw_info',
                fake_get_nw_info)
        fake_network.set_stub_network_methods(self.stubs)

        self.flags(compute_driver='nova.virt.fake.FakeDriver',
                   notification_driver=[test_notifier.__name__],
                   network_manager='nova.network.manager.FlatManager',
                   notify_on_state_change="vm_and_task_state",
                   host='testhost')

        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id, self.project_id)
        test_notifier.NOTIFICATIONS = []

        self.instance = self._wrapped_create()
예제 #9
0
    def test_deferred_delete_force(self):
        """Creates, deletes and force deletes a server."""
        self.flags(reclaim_instance_interval=1)
        fake_network.set_stub_network_methods(self.stubs)

        # Create server
        server = self._build_minimal_create_server_request()

        created_server = self.api.post_server({"server": server})
        LOG.debug("created_server: %s" % created_server)
        self.assertTrue(created_server["id"])
        created_server_id = created_server["id"]

        # Wait for it to finish being created
        found_server = self._wait_for_state_change(created_server, "BUILD")

        # It should be available...
        self.assertEqual("ACTIVE", found_server["status"])

        # Delete the server
        self.api.delete_server(created_server_id)

        # Wait for queued deletion
        found_server = self._wait_for_state_change(found_server, "ACTIVE")
        self.assertEqual("DELETED", found_server["status"])

        # Force delete server
        self.api.post_server_action(created_server_id, {"forceDelete": {}})

        # Wait for real deletion
        self._wait_for_deletion(created_server_id)
예제 #10
0
    def test_deferred_delete_restore(self):
        """Creates, deletes and restores a server."""
        self.flags(reclaim_instance_interval=1)
        fake_network.set_stub_network_methods(self.stubs)

        # Create server
        server = self._build_minimal_create_server_request()

        created_server = self.api.post_server({'server': server})
        LOG.debug("created_server: %s" % created_server)
        self.assertTrue(created_server['id'])
        created_server_id = created_server['id']

        # Wait for it to finish being created
        found_server = self._wait_for_state_change(created_server, 'BUILD')

        # It should be available...
        self.assertEqual('ACTIVE', found_server['status'])

        # Delete the server
        self.api.delete_server(created_server_id)

        # Wait for queued deletion
        found_server = self._wait_for_state_change(found_server, 'ACTIVE')
        self.assertEqual('DELETED', found_server['status'])

        # Restore server
        self.api.post_server_action(created_server_id, {'restore': {}})

        # Wait for server to become active again
        found_server = self._wait_for_state_change(found_server, 'DELETED')
        self.assertEqual('ACTIVE', found_server['status'])
예제 #11
0
    def test_deferred_delete_force(self):
        # Creates, deletes and force deletes a server.
        self.flags(reclaim_instance_interval=3600)
        fake_network.set_stub_network_methods(self.stubs)

        # Create server
        server = self._build_minimal_create_server_request()

        created_server = self.api.post_server({'server': server})
        LOG.debug("created_server: %s" % created_server)
        self.assertTrue(created_server['id'])
        created_server_id = created_server['id']

        # Wait for it to finish being created
        found_server = self._wait_for_state_change(created_server, 'BUILD')

        # It should be available...
        self.assertEqual('ACTIVE', found_server['status'])

        # Delete the server
        self.api.delete_server(created_server_id)

        # Wait for queued deletion
        found_server = self._wait_for_state_change(found_server, 'ACTIVE')
        self.assertEqual('SOFT_DELETED', found_server['status'])

        # Force delete server
        self.api.post_server_action(created_server_id,
                                    {self._force_delete_parameter: {}})

        # Wait for real deletion
        self._wait_for_deletion(created_server_id)
예제 #12
0
    def test_deferred_delete(self):
        # Creates, deletes and waits for server to be reclaimed.
        self.flags(reclaim_instance_interval=1)
        fake_network.set_stub_network_methods(self.stubs)

        # Create server
        server = self._build_minimal_create_server_request()

        created_server = self.api.post_server({'server': server})
        LOG.debug("created_server: %s" % created_server)
        self.assertTrue(created_server['id'])
        created_server_id = created_server['id']

        # Wait for it to finish being created
        found_server = self._wait_for_state_change(created_server, 'BUILD')

        # It should be available...
        self.assertEqual('ACTIVE', found_server['status'])

        # Cannot restore unless instance is deleted
        self.assertRaises(client.OpenStackApiException,
                          self.api.post_server_action, created_server_id,
                          {'restore': {}})

        # Delete the server
        self.api.delete_server(created_server_id)

        # Wait for queued deletion
        found_server = self._wait_for_state_change(found_server, 'ACTIVE')
        self.assertEqual('SOFT_DELETED', found_server['status'])

        self._force_reclaim()

        # Wait for real deletion
        self._wait_for_deletion(created_server_id)
예제 #13
0
    def setUp(self):
        def fake_get_nw_info(cls, ctxt, instance):
            self.assertTrue(ctxt.is_admin)
            return fake_network.fake_get_instance_nw_info(self.stubs, 1, 1,
                                                          spectacular=True)

        super(UsageInfoTestCase, self).setUp()
        self.stubs.Set(network_api.API, 'get_instance_nw_info',
                       fake_get_nw_info)

        self.flags(compute_driver='nova.virt.fake.FakeDriver',
                   notification_driver=[test_notifier.__name__],
                   network_manager='nova.network.manager.FlatManager')
        self.compute = importutils.import_object(CONF.compute_manager)
        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id, self.project_id)
        test_notifier.NOTIFICATIONS = []

        def fake_show(meh, context, id):
            return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}

        self.stubs.Set(nova.tests.image.fake._FakeImageService,
                       'show', fake_show)
        fake_network.set_stub_network_methods(self.stubs)
예제 #14
0
    def setUp(self):
        super(CinderCloudTestCase, self).setUp()
        vol_tmpdir = tempfile.mkdtemp()
        self.flags(compute_driver='nova.virt.fake.FakeDriver',
                   volume_api_class='nova.tests.fake_volume.API')

        def fake_show(meh, context, id):
            return {'id': id,
                    'name': 'fake_name',
                    'container_format': 'ami',
                    'status': 'active',
                    'properties': {
                        'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                        'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                        'type': 'machine',
                        'image_state': 'available'}}

        def fake_detail(_self, context, **kwargs):
            image = fake_show(None, context, None)
            image['name'] = kwargs.get('filters', {}).get('name')
            return [image]

        self.stubs.Set(fake._FakeImageService, 'show', fake_show)
        self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
        fake.stub_out_image_service(self.stubs)

        def dumb(*args, **kwargs):
            pass

        self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
        fake_network.set_stub_network_methods(self.stubs)

        # set up our cloud
        self.cloud = cloud.CloudController()
        self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')

        # Short-circuit the conductor service
        self.flags(use_local=True, group='conductor')

        # set up services
        self.compute = self.start_service('compute')
        self.scheduler = self.start_service('scheduler')
        self.network = self.start_service('network')

        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id,
                                              self.project_id,
                                              is_admin=True)
        self.volume_api = volume.API()

        # NOTE(comstud): Make 'cast' behave like a 'call' which will
        # ensure that operations complete
        self.stubs.Set(rpc, 'cast', rpc.call)

        # make sure we can map ami-00000001/2 to a uuid in FakeImageService
        db.api.s3_image_create(self.context,
                               'cedef40a-ed67-4d10-800e-17455edce175')
        db.api.s3_image_create(self.context,
                               '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
예제 #15
0
    def setUp(self):
        def fake_get_nw_info(cls, ctxt, instance):
            self.assertTrue(ctxt.is_admin)
            return fake_network.fake_get_instance_nw_info(self.stubs,
                                                          1,
                                                          1,
                                                          spectacular=True)

        super(UsageInfoTestCase, self).setUp()
        self.stubs.Set(nova.network.API, 'get_instance_nw_info',
                       fake_get_nw_info)

        self.flags(compute_driver='nova.virt.fake.FakeDriver',
                   notification_driver=[
                       'nova.openstack.common.notifier.test_notifier'
                   ],
                   network_manager='nova.network.manager.FlatManager')
        self.compute = importutils.import_object(FLAGS.compute_manager)
        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id, self.project_id)
        test_notifier.NOTIFICATIONS = []

        def fake_show(meh, context, id):
            return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}

        self.stubs.Set(nova.tests.image.fake._FakeImageService, 'show',
                       fake_show)
        fake_network.set_stub_network_methods(self.stubs)
예제 #16
0
    def setUp(self):
        super(CinderCloudTestCase, self).setUp()
        vol_tmpdir = tempfile.mkdtemp()
        self.flags(
            compute_driver="nova.virt.fake.FakeDriver",
            volume_api_class="nova.tests.fake_volume.API",
            volumes_dir=vol_tmpdir,
        )

        def fake_show(meh, context, id):
            return {
                "id": id,
                "name": "fake_name",
                "container_format": "ami",
                "properties": {
                    "kernel_id": "cedef40a-ed67-4d10-800e-17455edce175",
                    "ramdisk_id": "cedef40a-ed67-4d10-800e-17455edce175",
                    "type": "machine",
                    "image_state": "available",
                },
            }

        def fake_detail(_self, context, **kwargs):
            image = fake_show(None, context, None)
            image["name"] = kwargs.get("filters", {}).get("name")
            return [image]

        self.stubs.Set(fake._FakeImageService, "show", fake_show)
        self.stubs.Set(fake._FakeImageService, "detail", fake_detail)
        fake.stub_out_image_service(self.stubs)

        def dumb(*args, **kwargs):
            pass

        self.stubs.Set(compute_utils, "notify_about_instance_usage", dumb)
        fake_network.set_stub_network_methods(self.stubs)

        # set up our cloud
        self.cloud = cloud.CloudController()
        self.flags(compute_scheduler_driver="nova.scheduler." "chance.ChanceScheduler")

        # set up services
        self.compute = self.start_service("compute")
        self.scheduler = self.start_service("scheduler")
        self.network = self.start_service("network")
        self.volume = self.start_service("volume")

        self.user_id = "fake"
        self.project_id = "fake"
        self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True)
        self.volume_api = volume.API()

        # NOTE(comstud): Make 'cast' behave like a 'call' which will
        # ensure that operations complete
        self.stubs.Set(rpc, "cast", rpc.call)

        # make sure we can map ami-00000001/2 to a uuid in FakeImageService
        db.api.s3_image_create(self.context, "cedef40a-ed67-4d10-800e-17455edce175")
        db.api.s3_image_create(self.context, "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6")
예제 #17
0
    def setUp(self):
        super(EC2ValidateTestCase, self).setUp()
        self.flags(compute_driver="nova.virt.fake.FakeDriver")

        def dumb(*args, **kwargs):
            pass

        self.stubs.Set(compute_utils, "notify_about_instance_usage", dumb)
        fake_network.set_stub_network_methods(self.stubs)

        # set up our cloud
        self.cloud = cloud.CloudController()

        # set up services
        self.conductor = self.start_service("conductor", manager=CONF.conductor.manager)
        self.compute = self.start_service("compute")
        self.scheduter = self.start_service("scheduler")
        self.network = self.start_service("network")
        self.image_service = fake.FakeImageService()

        self.user_id = "fake"
        self.project_id = "fake"
        self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True)

        self.EC2_MALFORMED_IDS = ["foobar", "", 123]
        self.EC2_VALID__IDS = ["i-284f3a41", "i-001", "i-deadbeef"]

        self.ec2_id_exception_map = [(x, exception.InvalidInstanceIDMalformedEC2) for x in self.EC2_MALFORMED_IDS]
        self.ec2_id_exception_map.extend([(x, exception.InstanceNotFound) for x in self.EC2_VALID__IDS])
        self.volume_id_exception_map = [(x, exception.InvalidInstanceIDMalformedEC2) for x in self.EC2_MALFORMED_IDS]
        self.volume_id_exception_map.extend([(x, exception.VolumeNotFound) for x in self.EC2_VALID__IDS])

        def fake_show(meh, context, id):
            return {
                "id": id,
                "container_format": "ami",
                "properties": {
                    "kernel_id": "cedef40a-ed67-4d10-800e-17455edce175",
                    "ramdisk_id": "cedef40a-ed67-4d10-800e-17455edce175",
                    "type": "machine",
                    "image_state": "available",
                },
            }

        def fake_detail(self, context, **kwargs):
            image = fake_show(self, context, None)
            image["name"] = kwargs.get("name")
            return [image]

        fake.stub_out_image_service(self.stubs)
        self.stubs.Set(fake._FakeImageService, "show", fake_show)
        self.stubs.Set(fake._FakeImageService, "detail", fake_detail)

        self.useFixture(cast_as_call.CastAsCall(self.stubs))

        # make sure we can map ami-00000001/2 to a uuid in FakeImageService
        db.s3_image_create(self.context, "cedef40a-ed67-4d10-800e-17455edce175")
        db.s3_image_create(self.context, "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6")
예제 #18
0
    def setUp(self):
        super(TestNovaNotifier, self).setUp()
        nova_CONF.compute_driver = 'nova.virt.fake.FakeDriver'
        nova_CONF.notification_driver = [nova_notifier.__name__]
        nova_CONF.rpc_backend = 'ceilometer.openstack.common.rpc.impl_fake'
        self.compute = importutils.import_object(nova_CONF.compute_manager)
        self.context = context.get_admin_context()
        fake_network.set_stub_network_methods(self.stubs)

        self.instance = {
            "name": "instance-1",
            "id": 1,
            "image_ref": "FAKE",
            "user_id": "FAKE",
            "project_id": "FAKE",
            "display_name": "FAKE NAME",
            "hostname": "abcdef",
            "reservation_id": "FAKE RID",
            "instance_type_id": 1,
            "architecture": "x86",
            "memory_mb": "1024",
            "root_gb": "20",
            "ephemeral_gb": "0",
            "vcpus": 1,
            "host": "fakehost",
            "availability_zone": "1e3ce043029547f1a61c1996d1a531a4",
            "created_at": '2012-05-08 20:23:41',
            "os_type": "linux",
            "kernel_id": "kernelid",
            "ramdisk_id": "ramdiskid",
            "vm_state": vm_states.ACTIVE,
            "access_ip_v4": "someip",
            "access_ip_v6": "someip",
            "metadata": {},
            "uuid": "144e08f4-00cb-11e2-888e-5453ed1bbb5f",
            "system_metadata": {}
        }
        self.stubs.Set(db, 'instance_info_cache_delete', self.do_nothing)
        self.stubs.Set(db, 'instance_destroy', self.do_nothing)
        self.stubs.Set(db, 'instance_system_metadata_get',
                       self.fake_db_instance_system_metadata_get)
        self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
                       lambda context, instance: {})
        self.stubs.Set(
            db, 'instance_update_and_get_original',
            lambda context, uuid, kwargs: (self.instance, self.instance))

        agent_manager = manager.AgentManager()
        agent_manager.pollster_manager = \
            test_manager.TestExtensionManager([
                extension.Extension('test',
                                    None,
                                    None,
                                    self.Pollster(),
                                    ),
            ])
        nova_notifier.initialize_manager(agent_manager)
예제 #19
0
    def setUp(self):
        super(TestNovaNotifier, self).setUp()
        nova_CONF.compute_driver = 'nova.virt.fake.FakeDriver'
        nova_CONF.notification_driver = [nova_notifier.__name__]
        nova_CONF.rpc_backend = 'ceilometer.openstack.common.rpc.impl_fake'
        self.compute = importutils.import_object(nova_CONF.compute_manager)
        self.context = context.get_admin_context()
        fake_network.set_stub_network_methods(self.stubs)

        self.instance = {"name": "instance-1",
                         "id": 1,
                         "image_ref": "FAKE",
                         "user_id": "FAKE",
                         "project_id": "FAKE",
                         "display_name": "FAKE NAME",
                         "hostname": "abcdef",
                         "reservation_id": "FAKE RID",
                         "instance_type_id": 1,
                         "architecture": "x86",
                         "memory_mb": "1024",
                         "root_gb": "20",
                         "ephemeral_gb": "0",
                         "vcpus": 1,
                         "host": "fakehost",
                         "availability_zone":
                         "1e3ce043029547f1a61c1996d1a531a4",
                         "created_at": '2012-05-08 20:23:41',
                         "os_type": "linux",
                         "kernel_id": "kernelid",
                         "ramdisk_id": "ramdiskid",
                         "vm_state": vm_states.ACTIVE,
                         "access_ip_v4": "someip",
                         "access_ip_v6": "someip",
                         "metadata": {},
                         "uuid": "144e08f4-00cb-11e2-888e-5453ed1bbb5f",
                         "system_metadata": {}}
        self.stubs.Set(db, 'instance_info_cache_delete', self.do_nothing)
        self.stubs.Set(db, 'instance_destroy', self.do_nothing)
        self.stubs.Set(db, 'instance_system_metadata_get',
                       self.fake_db_instance_system_metadata_get)
        self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
                       lambda context, instance: {})
        self.stubs.Set(db, 'instance_update_and_get_original',
                       lambda context, uuid, kwargs: (self.instance,
                                                      self.instance))

        self.stubs.Set(publish, 'publish_counter', self.do_nothing)
        agent_manager = manager.AgentManager()
        agent_manager.ext_manager = \
            test_manager.TestExtensionManager([
                extension.Extension('test',
                                    None,
                                    None,
                                    self.Pollster(),
                                    ),
            ])
        nova_notifier.initialize_manager(agent_manager)
예제 #20
0
    def setUp(self):
        super(UsageInfoTestCase, self).setUp()
        self.flags(compute_driver='nova.virt.fake.FakeDriver',
                   host='fake',
                   notification_driver=[test_notifier.__name__])
        fake_network.set_stub_network_methods(self.stubs)

        self.volume = importutils.import_object(FLAGS.volume_manager)
        self.user_id = 'fake'
        self.project_id = 'fake'
        self.snapshot_id = 'fake'
        self.volume_size = 0
        self.context = context.RequestContext(self.user_id, self.project_id)
        test_notifier.NOTIFICATIONS = []
예제 #21
0
    def setUp(self):
        super(UsageInfoTestCase, self).setUp()
        self.flags(compute_driver='nova.virt.fake.FakeDriver',
                   host='fake',
                   notification_driver=[test_notifier.__name__])
        fake_network.set_stub_network_methods(self.stubs)

        self.volume = importutils.import_object(FLAGS.volume_manager)
        self.user_id = 'fake'
        self.project_id = 'fake'
        self.snapshot_id = 'fake'
        self.volume_size = 0
        self.context = context.RequestContext(self.user_id, self.project_id)
        test_notifier.NOTIFICATIONS = []
예제 #22
0
    def setUp(self):
        super(TestNovaNotifier, self).setUp()
        flags.FLAGS.compute_driver = 'nova.virt.fake.FakeDriver'
        flags.FLAGS.notification_driver = [nova_notifier.__name__]
        self.compute = importutils.import_object(flags.FLAGS.compute_manager)
        self.context = context.RequestContext('admin', 'admin', is_admin=True)
        fake_network.set_stub_network_methods(self.stubs)

        self.instance = {"name": "instance-1",
                         "id": 1,
                         "image_ref": "FAKE",
                         "user_id": "FAKE",
                         "project_id": "FAKE",
                         "display_name": "FAKE NAME",
                         "reservation_id": "FAKE RID",
                         "instance_type_id": 1,
                         "architecture": "x86",
                         "memory_mb": "1024",
                         "root_gb": "20",
                         "ephemeral_gb": "0",
                         "vcpus": 1,
                         "host": "fakehost",
                         "availability_zone":
                             "1e3ce043029547f1a61c1996d1a531a4",
                         "created_at": '2012-05-08 20:23:41',
                         "os_type": "linux",
                         "kernel_id": "kernelid",
                         "ramdisk_id": "ramdiskid",
                         "vm_state": vm_states.ACTIVE,
                         "access_ip_v4": "someip",
                         "access_ip_v6": "someip",
                         "metadata": {},
                         "uuid": "144e08f4-00cb-11e2-888e-5453ed1bbb5f"}

        self.stubs.Set(db, 'instance_get_by_uuid', self.fake_db_instance_get)
        self.stubs.Set(db, 'instance_info_cache_delete', self.do_nothing)
        self.stubs.Set(db, 'instance_destroy', self.do_nothing)
        self.stubs.Set(db, 'instance_system_metadata_get',
                       self.fake_db_instance_system_metadata_get)
        self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
                       lambda context, instance: {})
        self.stubs.Set(db, 'instance_update_and_get_original',
                       lambda context, uuid, kwargs: (self.instance,
                                                      self.instance))

        self.stubs.Set(publish, 'publish_counter', self.do_nothing)
        nova_notifier._initialize_config_options = False
        nova_notifier.initialize_manager()
        nova_notifier._agent_manager.pollsters = [('test', self.Pollster())]
예제 #23
0
    def setUp(self):
        super(TestNovaNotifier, self).setUp()
        flags.FLAGS.compute_driver = 'nova.virt.fake.FakeDriver'
        flags.FLAGS.notification_driver = [nova_notifier.__name__]
        self.compute = importutils.import_object(flags.FLAGS.compute_manager)
        self.context = context.RequestContext('admin', 'admin', is_admin=True)
        fake_network.set_stub_network_methods(self.stubs)

        self.instance = {
            "name": "instance-1",
            "id": 1,
            "image_ref": "FAKE",
            "user_id": "FAKE",
            "project_id": "FAKE",
            "display_name": "FAKE NAME",
            "reservation_id": "FAKE RID",
            "instance_type_id": 1,
            "architecture": "x86",
            "memory_mb": "1024",
            "root_gb": "20",
            "ephemeral_gb": "0",
            "vcpus": 1,
            "host": "fakehost",
            "availability_zone": "1e3ce043029547f1a61c1996d1a531a4",
            "created_at": '2012-05-08 20:23:41',
            "os_type": "linux",
            "kernel_id": "kernelid",
            "ramdisk_id": "ramdiskid",
            "vm_state": vm_states.ACTIVE,
            "access_ip_v4": "someip",
            "access_ip_v6": "someip",
            "metadata": {},
            "uuid": "144e08f4-00cb-11e2-888e-5453ed1bbb5f"
        }

        self.stubs.Set(db, 'instance_get', self.fake_db_instance_get)
        self.stubs.Set(db, 'instance_info_cache_delete', self.do_nothing)
        self.stubs.Set(db, 'instance_destroy', self.do_nothing)
        self.stubs.Set(db, 'instance_system_metadata_get',
                       self.fake_db_instance_system_metadata_get)
        self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
                       lambda context, instance: {})
        self.stubs.Set(
            db, 'instance_update_and_get_original',
            lambda context, uuid, kwargs: (self.instance, self.instance))

        self.stubs.Set(publish, 'publish_counter', self.do_nothing)
        nova_notifier.notify.manager.pollsters = [('test', self.Pollster())]
예제 #24
0
    def test_rename_server(self):
        # Test building and renaming a server.
        fake_network.set_stub_network_methods(self.stubs)

        # Create a server
        server = self._build_minimal_create_server_request()
        created_server = self.api.post_server({'server': server})
        LOG.debug("created_server: %s" % created_server)
        server_id = created_server['id']
        self.assertTrue(server_id)

        # Rename the server to 'new-name'
        self.api.put_server(server_id, {'server': {'name': 'new-name'}})

        # Check the name of the server
        created_server = self.api.get_server(server_id)
        self.assertEqual(created_server['name'], 'new-name')

        # Cleanup
        self._delete_server(server_id)
예제 #25
0
    def test_create_server_with_error(self):
        # Create a server which will enter error state.
        fake_network.set_stub_network_methods(self.stubs)

        def throw_error(*_):
            raise Exception()

        self.stubs.Set(nova.virt.fake.FakeDriver, 'spawn', throw_error)

        server = self._build_minimal_create_server_request()
        created_server = self.api.post_server({"server": server})
        created_server_id = created_server['id']

        found_server = self.api.get_server(created_server_id)
        self.assertEqual(created_server_id, found_server['id'])

        found_server = self._wait_for_state_change(found_server, 'BUILD')

        self.assertEqual('ERROR', found_server['status'])
        self._delete_server(created_server_id)
예제 #26
0
    def test_create_server_with_error(self):
        # Create a server which will enter error state.
        fake_network.set_stub_network_methods(self.stubs)

        def throw_error(*_):
            raise Exception()

        self.stubs.Set(nova.virt.fake.FakeDriver, 'spawn', throw_error)

        server = self._build_minimal_create_server_request()
        created_server = self.api.post_server({"server": server})
        created_server_id = created_server['id']

        found_server = self.api.get_server(created_server_id)
        self.assertEqual(created_server_id, found_server['id'])

        found_server = self._wait_for_state_change(found_server, 'BUILD')

        self.assertEqual('ERROR', found_server['status'])
        self._delete_server(created_server_id)
예제 #27
0
    def test_rename_server(self):
        # Test building and renaming a server.
        fake_network.set_stub_network_methods(self.stubs)

        # Create a server
        server = self._build_minimal_create_server_request()
        created_server = self.api.post_server({'server': server})
        LOG.debug("created_server: %s" % created_server)
        server_id = created_server['id']
        self.assertTrue(server_id)

        # Rename the server to 'new-name'
        self.api.put_server(server_id, {'server': {'name': 'new-name'}})

        # Check the name of the server
        created_server = self.api.get_server(server_id)
        self.assertEqual(created_server['name'], 'new-name')

        # Cleanup
        self._delete_server(server_id)
예제 #28
0
    def test_rename_server(self):
        """Test building and renaming a server."""
        fake_network.set_stub_network_methods(self.stubs)

        # Create a server
        server = self._build_minimal_create_server_request()
        created_server = self.api.post_server({"server": server})
        LOG.debug("created_server: %s" % created_server)
        server_id = created_server["id"]
        self.assertTrue(server_id)

        # Rename the server to 'new-name'
        self.api.put_server(server_id, {"server": {"name": "new-name"}})

        # Check the name of the server
        created_server = self.api.get_server(server_id)
        self.assertEqual(created_server["name"], "new-name")

        # Cleanup
        self._delete_server(server_id)
예제 #29
0
    def test_create_server_with_metadata(self):
        # Creates a server with metadata.
        fake_network.set_stub_network_methods(self.stubs)

        # Build the server data gradually, checking errors along the way
        server = self._build_minimal_create_server_request()

        metadata = {}
        for i in range(30):
            metadata['key_%s' % i] = 'value_%s' % i

        server['metadata'] = metadata

        post = {'server': server}
        created_server = self.api.post_server(post)
        LOG.debug("created_server: %s" % created_server)
        self.assertTrue(created_server['id'])
        created_server_id = created_server['id']

        found_server = self.api.get_server(created_server_id)
        self.assertEqual(created_server_id, found_server['id'])
        self.assertEqual(metadata, found_server.get('metadata'))

        # The server should also be in the all-servers details list
        servers = self.api.get_servers(detail=True)
        server_map = dict((server['id'], server) for server in servers)
        found_server = server_map.get(created_server_id)
        self.assertTrue(found_server)
        # Details do include metadata
        self.assertEqual(metadata, found_server.get('metadata'))

        # The server should also be in the all-servers summary list
        servers = self.api.get_servers(detail=False)
        server_map = dict((server['id'], server) for server in servers)
        found_server = server_map.get(created_server_id)
        self.assertTrue(found_server)
        # Summary should not include metadata
        self.assertFalse(found_server.get('metadata'))

        # Cleanup
        self._delete_server(created_server_id)
예제 #30
0
    def test_create_server_with_metadata(self):
        # Creates a server with metadata.
        fake_network.set_stub_network_methods(self.stubs)

        # Build the server data gradually, checking errors along the way
        server = self._build_minimal_create_server_request()

        metadata = {}
        for i in range(30):
            metadata['key_%s' % i] = 'value_%s' % i

        server['metadata'] = metadata

        post = {'server': server}
        created_server = self.api.post_server(post)
        LOG.debug("created_server: %s" % created_server)
        self.assertTrue(created_server['id'])
        created_server_id = created_server['id']

        found_server = self.api.get_server(created_server_id)
        self.assertEqual(created_server_id, found_server['id'])
        self.assertEqual(metadata, found_server.get('metadata'))

        # The server should also be in the all-servers details list
        servers = self.api.get_servers(detail=True)
        server_map = dict((server['id'], server) for server in servers)
        found_server = server_map.get(created_server_id)
        self.assertTrue(found_server)
        # Details do include metadata
        self.assertEqual(metadata, found_server.get('metadata'))

        # The server should also be in the all-servers summary list
        servers = self.api.get_servers(detail=False)
        server_map = dict((server['id'], server) for server in servers)
        found_server = server_map.get(created_server_id)
        self.assertTrue(found_server)
        # Summary should not include metadata
        self.assertFalse(found_server.get('metadata'))

        # Cleanup
        self._delete_server(created_server_id)
예제 #31
0
    def test_create_server_with_injected_files(self):
        # Creates a server with injected_files.
        fake_network.set_stub_network_methods(self.stubs)
        personality = []

        # Inject a text file
        data = 'Hello, World!'
        personality.append({
            'path': '/helloworld.txt',
            'contents': data.encode('base64'),
        })

        # Inject a binary file
        data = zlib.compress('Hello, World!')
        personality.append({
            'path': '/helloworld.zip',
            'contents': data.encode('base64'),
        })

        # Create server
        server = self._build_minimal_create_server_request()
        server['personality'] = personality

        post = {'server': server}

        created_server = self.api.post_server(post)
        LOG.debug("created_server: %s" % created_server)
        self.assertTrue(created_server['id'])
        created_server_id = created_server['id']

        # Check it's there
        found_server = self.api.get_server(created_server_id)
        self.assertEqual(created_server_id, found_server['id'])

        found_server = self._wait_for_state_change(found_server, 'BUILD')
        self.assertEqual('ACTIVE', found_server['status'])

        # Cleanup
        self._delete_server(created_server_id)
예제 #32
0
    def test_create_server_with_injected_files(self):
        # Creates a server with injected_files.
        fake_network.set_stub_network_methods(self.stubs)
        personality = []

        # Inject a text file
        data = 'Hello, World!'
        personality.append({
            'path': '/helloworld.txt',
            'contents': data.encode('base64'),
        })

        # Inject a binary file
        data = zlib.compress('Hello, World!')
        personality.append({
            'path': '/helloworld.zip',
            'contents': data.encode('base64'),
        })

        # Create server
        server = self._build_minimal_create_server_request()
        server['personality'] = personality

        post = {'server': server}

        created_server = self.api.post_server(post)
        LOG.debug("created_server: %s" % created_server)
        self.assertTrue(created_server['id'])
        created_server_id = created_server['id']

        # Check it's there
        found_server = self.api.get_server(created_server_id)
        self.assertEqual(created_server_id, found_server['id'])

        found_server = self._wait_for_state_change(found_server, 'BUILD')
        self.assertEqual('ACTIVE', found_server['status'])

        # Cleanup
        self._delete_server(created_server_id)
예제 #33
0
    def test_deferred_delete(self):
        """Creates, deletes and waits for server to be reclaimed."""
        self.flags(reclaim_instance_interval=1)
        fake_network.set_stub_network_methods(self.stubs)

        # enforce periodic tasks run in short time to avoid wait for 60s.
        self._restart_compute_service(periodic_interval=0.3, periodic_fuzzy_delay=0)

        # Create server
        server = self._build_minimal_create_server_request()

        created_server = self.api.post_server({"server": server})
        LOG.debug("created_server: %s" % created_server)
        self.assertTrue(created_server["id"])
        created_server_id = created_server["id"]

        # Wait for it to finish being created
        found_server = self._wait_for_state_change(created_server, "BUILD")

        # It should be available...
        self.assertEqual("ACTIVE", found_server["status"])

        # Cannot restore unless instance is deleted
        self.assertRaises(client.OpenStackApiException, self.api.post_server_action, created_server_id, {"restore": {}})

        # Cannot forceDelete unless instance is deleted
        self.assertRaises(
            client.OpenStackApiException, self.api.post_server_action, created_server_id, {"forceDelete": {}}
        )

        # Delete the server
        self.api.delete_server(created_server_id)

        # Wait for queued deletion
        found_server = self._wait_for_state_change(found_server, "ACTIVE")
        self.assertEqual("DELETED", found_server["status"])

        # Wait for real deletion
        self._wait_for_deletion(created_server_id)
예제 #34
0
    def test_create_and_delete_server(self):
        # Creates and deletes a server.
        fake_network.set_stub_network_methods(self.stubs)

        # Create server
        # Build the server data gradually, checking errors along the way
        server = {}
        good_server = self._build_minimal_create_server_request()

        post = {'server': server}

        # Without an imageRef, this throws 500.
        # TODO(justinsb): Check whatever the spec says should be thrown here
        self.assertRaises(client.OpenStackApiException, self.api.post_server,
                          post)

        # With an invalid imageRef, this throws 500.
        server['imageRef'] = self.get_invalid_image()
        # TODO(justinsb): Check whatever the spec says should be thrown here
        self.assertRaises(client.OpenStackApiException, self.api.post_server,
                          post)

        # Add a valid imageRef
        server['imageRef'] = good_server.get('imageRef')

        # Without flavorRef, this throws 500
        # TODO(justinsb): Check whatever the spec says should be thrown here
        self.assertRaises(client.OpenStackApiException, self.api.post_server,
                          post)

        server['flavorRef'] = good_server.get('flavorRef')

        # Without a name, this throws 500
        # TODO(justinsb): Check whatever the spec says should be thrown here
        self.assertRaises(client.OpenStackApiException, self.api.post_server,
                          post)

        # Set a valid server name
        server['name'] = good_server['name']

        created_server = self.api.post_server(post)
        LOG.debug("created_server: %s" % created_server)
        self.assertTrue(created_server['id'])
        created_server_id = created_server['id']

        # Check it's there
        found_server = self.api.get_server(created_server_id)
        self.assertEqual(created_server_id, found_server['id'])

        # It should also be in the all-servers list
        servers = self.api.get_servers()
        server_ids = [s['id'] for s in servers]
        self.assertTrue(created_server_id in server_ids)

        found_server = self._wait_for_state_change(found_server, 'BUILD')
        # It should be available...
        # TODO(justinsb): Mock doesn't yet do this...
        self.assertEqual('ACTIVE', found_server['status'])
        servers = self.api.get_servers(detail=True)
        for server in servers:
            self.assertTrue("image" in server)
            self.assertTrue("flavor" in server)

        self._delete_server(created_server_id)
    def setUp(self):
        super(EC2ValidateTestCase, self).setUp()
        self.flags(compute_driver='nova.virt.fake.FakeDriver')

        def dumb(*args, **kwargs):
            pass

        self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
        fake_network.set_stub_network_methods(self.stubs)

        # set up our cloud
        self.cloud = cloud.CloudController()

        # set up services
        self.conductor = self.start_service('conductor',
                manager=CONF.conductor.manager)
        self.compute = self.start_service('compute')
        self.scheduter = self.start_service('scheduler')
        self.network = self.start_service('network')
        self.image_service = fake.FakeImageService()

        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id,
                                              self.project_id,
                                              is_admin=True)

        self.EC2_MALFORMED_IDS = ['foobar', '', 123]
        self.EC2_VALID__IDS = ['i-284f3a41', 'i-001', 'i-deadbeef']

        self.ec2_id_exception_map = [(x,
                exception.InvalidInstanceIDMalformed)
                for x in self.EC2_MALFORMED_IDS]
        self.ec2_id_exception_map.extend([(x, exception.InstanceNotFound)
                for x in self.EC2_VALID__IDS])
        self.volume_id_exception_map = [(x,
                exception.InvalidInstanceIDMalformed)
                for x in self.EC2_MALFORMED_IDS]
        self.volume_id_exception_map.extend([(x, exception.VolumeNotFound)
                for x in self.EC2_VALID__IDS])

        def fake_show(meh, context, id):
            return {'id': id,
                    'container_format': 'ami',
                    'properties': {
                        'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                        'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                        'type': 'machine',
                        'image_state': 'available'}}

        def fake_detail(self, context, **kwargs):
            image = fake_show(self, context, None)
            image['name'] = kwargs.get('name')
            return [image]

        fake.stub_out_image_service(self.stubs)
        self.stubs.Set(fake._FakeImageService, 'show', fake_show)
        self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)

        self.useFixture(cast_as_call.CastAsCall(self.stubs))

        # make sure we can map ami-00000001/2 to a uuid in FakeImageService
        db.s3_image_create(self.context,
                               'cedef40a-ed67-4d10-800e-17455edce175')
        db.s3_image_create(self.context,
                               '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
예제 #36
0
    def test_create_and_rebuild_server(self):
        # Rebuild a server with metadata.
        fake_network.set_stub_network_methods(self.stubs)

        # create a server with initially has no metadata
        server = self._build_minimal_create_server_request()
        server_post = {'server': server}

        metadata = {}
        for i in range(30):
            metadata['key_%s' % i] = 'value_%s' % i

        server_post['server']['metadata'] = metadata

        created_server = self.api.post_server(server_post)
        LOG.debug("created_server: %s" % created_server)
        self.assertTrue(created_server['id'])
        created_server_id = created_server['id']

        created_server = self._wait_for_state_change(created_server, 'BUILD')

        # rebuild the server with metadata and other server attributes
        post = {}
        post['rebuild'] = {
            self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
            "name": "blah",
            self._access_ipv4_parameter: "172.19.0.2",
            self._access_ipv6_parameter: "fe80::2",
            "metadata": {'some': 'thing'},
        }
        post['rebuild'].update(self._get_access_ips_params())

        self.api.post_server_action(created_server_id, post)
        LOG.debug("rebuilt server: %s" % created_server)
        self.assertTrue(created_server['id'])

        found_server = self.api.get_server(created_server_id)
        self.assertEqual(created_server_id, found_server['id'])
        self.assertEqual({'some': 'thing'}, found_server.get('metadata'))
        self.assertEqual('blah', found_server.get('name'))
        self.assertEqual(post['rebuild'][self._image_ref_parameter],
                         found_server.get('image')['id'])
        self._verify_access_ips(found_server)

        # rebuild the server with empty metadata and nothing else
        post = {}
        post['rebuild'] = {
            self._image_ref_parameter: "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
            "metadata": {},
        }

        self.api.post_server_action(created_server_id, post)
        LOG.debug("rebuilt server: %s" % created_server)
        self.assertTrue(created_server['id'])

        found_server = self.api.get_server(created_server_id)
        self.assertEqual(created_server_id, found_server['id'])
        self.assertEqual({}, found_server.get('metadata'))
        self.assertEqual('blah', found_server.get('name'))
        self.assertEqual(post['rebuild'][self._image_ref_parameter],
                         found_server.get('image')['id'])
        self._verify_access_ips(found_server)

        # Cleanup
        self._delete_server(created_server_id)
예제 #37
0
    def setUp(self):
        super(CinderCloudTestCase, self).setUp()
        ec2utils.reset_cache()
        vol_tmpdir = self.useFixture(fixtures.TempDir()).path
        fake_utils.stub_out_utils_spawn_n(self.stubs)
        self.flags(compute_driver='nova.virt.fake.FakeDriver',
                   volume_api_class='nova.tests.fake_volume.API')

        def fake_show(meh, context, id):
            return {
                'id': id,
                'name': 'fake_name',
                'container_format': 'ami',
                'status': 'active',
                'properties': {
                    'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                    'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                    'type': 'machine',
                    'image_state': 'available'
                }
            }

        def fake_detail(_self, context, **kwargs):
            image = fake_show(None, context, None)
            image['name'] = kwargs.get('filters', {}).get('name')
            return [image]

        self.stubs.Set(fake._FakeImageService, 'show', fake_show)
        self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
        fake.stub_out_image_service(self.stubs)

        def dumb(*args, **kwargs):
            pass

        self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
        fake_network.set_stub_network_methods(self.stubs)

        # set up our cloud
        self.cloud = cloud.CloudController()
        self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')

        # Short-circuit the conductor service
        self.flags(use_local=True, group='conductor')

        # set up services
        self.conductor = self.start_service('conductor',
                                            manager=CONF.conductor.manager)
        self.compute = self.start_service('compute')
        self.scheduler = self.start_service('scheduler')
        self.network = self.start_service('network')
        self.consoleauth = self.start_service('consoleauth')

        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id,
                                              self.project_id,
                                              is_admin=True)
        self.volume_api = volume.API()
        self.volume_api.reset_fake_api(self.context)

        self.useFixture(cast_as_call.CastAsCall(self.stubs))

        # make sure we can map ami-00000001/2 to a uuid in FakeImageService
        db.s3_image_create(self.context,
                           'cedef40a-ed67-4d10-800e-17455edce175')
        db.s3_image_create(self.context,
                           '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
예제 #38
0
    def setUp(self):
        super(EC2ValidateTestCase, self).setUp()
        self.flags(compute_driver='nova.virt.fake.FakeDriver')

        def dumb(*args, **kwargs):
            pass

        self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
        fake_network.set_stub_network_methods(self.stubs)

        # set up our cloud
        self.cloud = cloud.CloudController()

        # Short-circuit the conductor service
        self.flags(use_local=True, group='conductor')

        # Stub out the notification service so we use the no-op serializer
        # and avoid lazy-load traces with the wrap_exception decorator in
        # the compute service.
        fake_notifier.stub_notifier(self.stubs)
        self.addCleanup(fake_notifier.reset)

        # set up services
        self.conductor = self.start_service('conductor',
                                            manager=CONF.conductor.manager)
        self.compute = self.start_service('compute')
        self.scheduter = self.start_service('scheduler')
        self.network = self.start_service('network')
        self.image_service = fake.FakeImageService()

        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id,
                                              self.project_id,
                                              is_admin=True)

        self.EC2_MALFORMED_IDS = ['foobar', '', 123]
        self.EC2_VALID__IDS = ['i-284f3a41', 'i-001', 'i-deadbeef']

        self.ec2_id_exception_map = [(x, exception.InvalidInstanceIDMalformed)
                                     for x in self.EC2_MALFORMED_IDS]
        self.ec2_id_exception_map.extend([(x, exception.InstanceNotFound)
                                          for x in self.EC2_VALID__IDS])
        self.volume_id_exception_map = [(x,
                                         exception.InvalidInstanceIDMalformed)
                                        for x in self.EC2_MALFORMED_IDS]
        self.volume_id_exception_map.extend([(x, exception.VolumeNotFound)
                                             for x in self.EC2_VALID__IDS])

        def fake_show(meh, context, id, **kwargs):
            return {
                'id': id,
                'container_format': 'ami',
                'properties': {
                    'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                    'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                    'type': 'machine',
                    'image_state': 'available'
                }
            }

        def fake_detail(self, context, **kwargs):
            image = fake_show(self, context, None)
            image['name'] = kwargs.get('name')
            return [image]

        fake.stub_out_image_service(self.stubs)
        self.stubs.Set(fake._FakeImageService, 'show', fake_show)
        self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)

        self.useFixture(cast_as_call.CastAsCall(self.stubs))

        # make sure we can map ami-00000001/2 to a uuid in FakeImageService
        db.s3_image_create(self.context,
                           'cedef40a-ed67-4d10-800e-17455edce175')
        db.s3_image_create(self.context,
                           '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
예제 #39
0
    def test_create_and_rebuild_server(self):
        """Rebuild a server with metadata."""
        fake_network.set_stub_network_methods(self.stubs)

        # create a server with initially has no metadata
        server = self._build_minimal_create_server_request()
        server_post = {"server": server}

        metadata = {}
        for i in range(30):
            metadata["key_%s" % i] = "value_%s" % i

        server_post["server"]["metadata"] = metadata

        created_server = self.api.post_server(server_post)
        LOG.debug("created_server: %s" % created_server)
        self.assertTrue(created_server["id"])
        created_server_id = created_server["id"]

        created_server = self._wait_for_state_change(created_server, "BUILD")

        # rebuild the server with metadata and other server attributes
        post = {}
        post["rebuild"] = {
            "imageRef": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
            "name": "blah",
            "accessIPv4": "172.19.0.2",
            "accessIPv6": "fe80::2",
            "metadata": {"some": "thing"},
        }

        self.api.post_server_action(created_server_id, post)
        LOG.debug("rebuilt server: %s" % created_server)
        self.assertTrue(created_server["id"])

        found_server = self.api.get_server(created_server_id)
        self.assertEqual(created_server_id, found_server["id"])
        self.assertEqual({"some": "thing"}, found_server.get("metadata"))
        self.assertEqual("blah", found_server.get("name"))
        self.assertEqual(post["rebuild"]["imageRef"], found_server.get("image")["id"])
        self.assertEqual("172.19.0.2", found_server["accessIPv4"])
        self.assertEqual("fe80::2", found_server["accessIPv6"])

        # rebuild the server with empty metadata and nothing else
        post = {}
        post["rebuild"] = {"imageRef": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6", "metadata": {}}

        self.api.post_server_action(created_server_id, post)
        LOG.debug("rebuilt server: %s" % created_server)
        self.assertTrue(created_server["id"])

        found_server = self.api.get_server(created_server_id)
        self.assertEqual(created_server_id, found_server["id"])
        self.assertEqual({}, found_server.get("metadata"))
        self.assertEqual("blah", found_server.get("name"))
        self.assertEqual(post["rebuild"]["imageRef"], found_server.get("image")["id"])
        self.assertEqual("172.19.0.2", found_server["accessIPv4"])
        self.assertEqual("fe80::2", found_server["accessIPv6"])

        # Cleanup
        self._delete_server(created_server_id)
예제 #40
0
    def setUp(self):
        super(CinderCloudTestCase, self).setUp()
        ec2utils.reset_cache()
        self.useFixture(fixtures.TempDir()).path
        fake_utils.stub_out_utils_spawn_n(self.stubs)
        self.flags(compute_driver='nova.virt.fake.FakeDriver',
                   volume_api_class='nova.tests.fake_volume.API')

        def fake_show(meh, context, id):
            return {'id': id,
                    'name': 'fake_name',
                    'container_format': 'ami',
                    'status': 'active',
                    'properties': {
                        'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                        'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                        'type': 'machine',
                        'image_state': 'available'}}

        def fake_detail(_self, context, **kwargs):
            image = fake_show(None, context, None)
            image['name'] = kwargs.get('filters', {}).get('name')
            return [image]

        self.stubs.Set(fake._FakeImageService, 'show', fake_show)
        self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
        fake.stub_out_image_service(self.stubs)

        def dumb(*args, **kwargs):
            pass

        self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
        fake_network.set_stub_network_methods(self.stubs)

        # set up our cloud
        self.cloud = cloud.CloudController()
        self.flags(scheduler_driver='nova.scheduler.chance.ChanceScheduler')

        # Short-circuit the conductor service
        self.flags(use_local=True, group='conductor')

        # set up services
        self.conductor = self.start_service('conductor',
                manager=CONF.conductor.manager)
        self.compute = self.start_service('compute')
        self.scheduler = self.start_service('scheduler')
        self.network = self.start_service('network')
        self.consoleauth = self.start_service('consoleauth')

        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id,
                                              self.project_id,
                                              is_admin=True)
        self.volume_api = volume.API()
        self.volume_api.reset_fake_api(self.context)

        self.useFixture(cast_as_call.CastAsCall(self.stubs))

        # make sure we can map ami-00000001/2 to a uuid in FakeImageService
        db.s3_image_create(self.context,
                               'cedef40a-ed67-4d10-800e-17455edce175')
        db.s3_image_create(self.context,
                               '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
예제 #41
0
    def setUp(self):
        super(TestNovaNotifier, self).setUp()
        nova_CONF.compute_driver = 'nova.virt.fake.FakeDriver'
        nova_CONF.notification_driver = [nova_notifier.__name__]
        nova_CONF.rpc_backend = 'nova.openstack.common.rpc.impl_fake'
        nova_CONF.vnc_enabled = False
        nova_CONF.spice.enabled = False
        self.compute = importutils.import_object(nova_CONF.compute_manager)
        self.context = context.get_admin_context()
        fake_network.set_stub_network_methods(self.stubs)

        instance_data = {
            "display_name": "instance-1",
            'OS-EXT-SRV-ATTR:instance_name': 'instance-1',
            "id": 1,
            "image_ref": "FAKE",
            "user_id": "FAKE",
            "project_id": "FAKE",
            "display_name": "FAKE NAME",
            "hostname": "abcdef",
            "reservation_id": "FAKE RID",
            "instance_type_id": 1,
            "architecture": "x86",
            "memory_mb": "1024",
            "root_gb": "20",
            "ephemeral_gb": "0",
            "vcpus": 1,
            'node': "fakenode",
            "host": "fakehost",
            "availability_zone": "1e3ce043029547f1a61c1996d1a531a4",
            "created_at": '2012-05-08 20:23:41',
            "launched_at": '2012-05-08 20:25:45',
            "terminated_at": '2012-05-09 20:23:41',
            "os_type": "linux",
            "kernel_id": "kernelid",
            "ramdisk_id": "ramdiskid",
            "vm_state": vm_states.ACTIVE,
            "task_state": None,
            "access_ip_v4": "192.168.5.4",
            "access_ip_v6": "2001:DB8::0",
            "metadata": {},
            "uuid": "144e08f4-00cb-11e2-888e-5453ed1bbb5f",
            "system_metadata": {}
        }

        self.instance = nova_instance.Instance()
        for key, value in instance_data.iteritems():
            setattr(self.instance, key, value)

        self.stubs.Set(db, 'instance_info_cache_delete', self.do_nothing)
        self.stubs.Set(db, 'instance_destroy', self.do_nothing)
        self.stubs.Set(db, 'instance_system_metadata_get',
                       self.fake_db_instance_system_metadata_get)
        self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
                       lambda context, instance: {})
        self.stubs.Set(
            db, 'instance_update_and_get_original',
            lambda context, uuid, kwargs: (self.instance, self.instance))
        self.stubs.Set(flavors, 'extract_flavor', lambda ref: {})

        # Set up to capture the notification messages generated by the
        # plugin and to invoke our notifier plugin.
        self.notifications = []
        notifier_api._reset_drivers()
        notifier_api.add_driver(self)
        notifier_api.add_driver(nova_notifier)

        ext_mgr = test_manager.TestExtensionManager([
            extension.Extension(
                'test',
                None,
                None,
                self.Pollster(),
            ),
        ])
        self.ext_mgr = ext_mgr
        self.gatherer = nova_notifier.DeletedInstanceStatsGatherer(ext_mgr)
        nova_notifier.initialize_gatherer(self.gatherer)

        # Terminate the instance to trigger the notification.
        with contextlib.nested(
                # Under Grizzly, Nova has moved to no-db access on the
                # compute node. The compute manager uses RPC to talk to
                # the conductor. We need to disable communication between
                # the nova manager and the remote system since we can't
                # expect the message bus to be available, or the remote
                # controller to be there if the message bus is online.
                mock.patch.object(self.compute, 'conductor_api'),
                # The code that looks up the instance uses a global
                # reference to the API, so we also have to patch that to
                # return our fake data.
                mock.patch.object(nova_notifier.instance_info_source,
                                  'instance_get_by_uuid',
                                  self.fake_instance_ref_get),
        ):
            self.compute.terminate_instance(self.context,
                                            instance=self.instance)
예제 #42
0
    def setUp(self):
        super(EC2ValidateTestCase, self).setUp()
        self.flags(compute_driver='nova.virt.fake.FakeDriver')

        def dumb(*args, **kwargs):
            pass

        self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
        fake_network.set_stub_network_methods(self.stubs)

        # set up our cloud
        self.cloud = cloud.CloudController()

        # set up services
        self.conductor = self.start_service('conductor',
                                            manager=CONF.conductor.manager)
        self.compute = self.start_service('compute')
        self.scheduter = self.start_service('scheduler')
        self.network = self.start_service('network')
        self.image_service = fake.FakeImageService()

        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id,
                                              self.project_id,
                                              is_admin=True)

        self.EC2_MALFORMED_IDS = ['foobar', '', 123]
        self.EC2_VALID__IDS = ['i-284f3a41', 'i-001', 'i-deadbeef']

        self.ec2_id_exception_map = [(x, exception.InvalidInstanceIDMalformed)
                                     for x in self.EC2_MALFORMED_IDS]
        self.ec2_id_exception_map.extend([(x, exception.InstanceNotFound)
                                          for x in self.EC2_VALID__IDS])
        self.volume_id_exception_map = [(x,
                                         exception.InvalidInstanceIDMalformed)
                                        for x in self.EC2_MALFORMED_IDS]
        self.volume_id_exception_map.extend([(x, exception.VolumeNotFound)
                                             for x in self.EC2_VALID__IDS])

        def fake_show(meh, context, id):
            return {
                'id': id,
                'container_format': 'ami',
                'properties': {
                    'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                    'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                    'type': 'machine',
                    'image_state': 'available'
                }
            }

        def fake_detail(self, context, **kwargs):
            image = fake_show(self, context, None)
            image['name'] = kwargs.get('name')
            return [image]

        fake.stub_out_image_service(self.stubs)
        self.stubs.Set(fake._FakeImageService, 'show', fake_show)
        self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)

        # NOTE(comstud): Make 'cast' behave like a 'call' which will
        # ensure that operations complete
        self.stubs.Set(rpc, 'cast', rpc.call)

        # make sure we can map ami-00000001/2 to a uuid in FakeImageService
        db.api.s3_image_create(self.context,
                               'cedef40a-ed67-4d10-800e-17455edce175')
        db.api.s3_image_create(self.context,
                               '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
예제 #43
0
    def setUp(self):
        super(TestNovaNotifier, self).setUp()
        nova_CONF.compute_driver = 'nova.virt.fake.FakeDriver'
        nova_CONF.notification_driver = [
            nova_notifier.__name__,
            'nova.openstack.common.notifier.rpc_notifier',
        ]
        nova_CONF.rpc_backend = 'nova.openstack.common.rpc.impl_fake'
        nova_CONF.vnc_enabled = False
        nova_CONF.spice.enabled = False
        self.compute = importutils.import_object(nova_CONF.compute_manager)
        self.context = context.get_admin_context()
        self.stubs = self.useFixture(moxstubout.MoxStubout()).stubs
        fake_network.set_stub_network_methods(self.stubs)

        self.instance_data = {"display_name": "instance-1",
                              "id": 1,
                              "image_ref": "FAKE",
                              "user_id": "FAKE",
                              "project_id": "FAKE",
                              "display_name": "FAKE NAME",
                              "hostname": "abcdef",
                              "reservation_id": "FAKE RID",
                              "instance_type_id": 1,
                              "architecture": "x86",
                              "memory_mb": "1024",
                              "root_gb": "20",
                              "ephemeral_gb": "0",
                              "vcpus": 1,
                              'node': "fakenode",
                              "host": "fakehost",
                              "availability_zone":
                              "1e3ce043029547f1a61c1996d1a531a4",
                              "created_at": '2012-05-08 20:23:41',
                              "launched_at": '2012-05-08 20:25:45',
                              "terminated_at": '2012-05-09 20:23:41',
                              "os_type": "linux",
                              "kernel_id": "kernelid",
                              "ramdisk_id": "ramdiskid",
                              "vm_state": vm_states.ACTIVE,
                              "task_state": None,
                              "access_ip_v4": "192.168.5.4",
                              "access_ip_v6": "2001:DB8::0",
                              "metadata": {},
                              "uuid": "144e08f4-00cb-11e2-888e-5453ed1bbb5f",
                              "system_metadata": {},
                              "user_data": None,
                              "cleaned": 0,
                              "deleted": None,
                              "vm_mode": None,
                              "deleted_at": None,
                              "disable_terminate": False,
                              "root_device_name": None,
                              "default_swap_device": None,
                              "launched_on": None,
                              "display_description": None,
                              "key_data": None,
                              "key_name": None,
                              "config_drive": None,
                              "power_state": None,
                              "default_ephemeral_device": None,
                              "progress": 0,
                              "scheduled_at": None,
                              "updated_at": None,
                              "shutdown_terminate": False,
                              "cell_name": 'cell',
                              "locked": False,
                              "locked_by": None,
                              "launch_index": 0,
                              "auto_disk_config": False,
                              }

        self.instance = nova_instance.Instance()
        self.instance = nova_instance.Instance._from_db_object(
                context, self.instance, self.instance_data,
                expected_attrs=['metadata', 'system_metadata'])

        self.stubs.Set(db, 'instance_info_cache_delete', self.do_nothing)
        self.stubs.Set(db, 'instance_destroy', self.do_nothing)
        self.stubs.Set(db, 'instance_system_metadata_get',
                       self.fake_db_instance_system_metadata_get)
        self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
                       lambda context, instance: {})
        self.stubs.Set(db, 'instance_update_and_get_original',
                       lambda *args, **kwargs: (self.instance, self.instance))
        self.stubs.Set(flavors, 'extract_flavor',
                       lambda ref: {})

        # Set up to capture the notification messages generated by the
        # plugin and to invoke our notifier plugin.
        self.notifications = []

        ext_mgr = test_manager.TestExtensionManager([
            extension.Extension('test',
                                None,
                                None,
                                self.Pollster(),
                                ),
        ])
        self.ext_mgr = ext_mgr
        self.gatherer = nova_notifier.DeletedInstanceStatsGatherer(ext_mgr)
        # Initialize the global _gatherer in nova_notifier to use the
        # gatherer in this test instead of the gatherer in nova_notifier.
        nova_notifier.initialize_gatherer(self.gatherer)

        # Terminate the instance to trigger the notification.
        with contextlib.nested(
            # Under Grizzly, Nova has moved to no-db access on the
            # compute node. The compute manager uses RPC to talk to
            # the conductor. We need to disable communication between
            # the nova manager and the remote system since we can't
            # expect the message bus to be available, or the remote
            # controller to be there if the message bus is online.
            mock.patch.object(self.compute, 'conductor_api'),
            # The code that looks up the instance uses a global
            # reference to the API, so we also have to patch that to
            # return our fake data.
            mock.patch.object(nova_notifier.conductor_api,
                              'instance_get_by_uuid',
                              self.fake_instance_ref_get),
            mock.patch.object(nova_notifier.conductor_api,
                              'instance_type_get',
                              self.fake_instance_type_get),
            mock.patch('nova.openstack.common.notifier.rpc_notifier.notify',
                       self.notify)
        ):
            with mock.patch.object(self.compute.conductor_api,
                                   'instance_destroy',
                                   return_value=self.instance):
                self.compute.terminate_instance(self.context,
                                                instance=self.instance,
                                                bdms=[],
                                                reservations=[])
예제 #44
0
    def test_create_and_rebuild_server(self):
        # Rebuild a server with metadata.
        fake_network.set_stub_network_methods(self.stubs)

        # create a server with initially has no metadata
        server = self._build_minimal_create_server_request()
        server_post = {'server': server}

        metadata = {}
        for i in range(30):
            metadata['key_%s' % i] = 'value_%s' % i

        server_post['server']['metadata'] = metadata

        created_server = self.api.post_server(server_post)
        LOG.debug("created_server: %s" % created_server)
        self.assertTrue(created_server['id'])
        created_server_id = created_server['id']

        created_server = self._wait_for_state_change(created_server, 'BUILD')

        # rebuild the server with metadata and other server attributes
        post = {}
        post['rebuild'] = {
            "imageRef": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
            "name": "blah",
            "accessIPv4": "172.19.0.2",
            "accessIPv6": "fe80::2",
            "metadata": {
                'some': 'thing'
            },
        }

        self.api.post_server_action(created_server_id, post)
        LOG.debug("rebuilt server: %s" % created_server)
        self.assertTrue(created_server['id'])

        found_server = self.api.get_server(created_server_id)
        self.assertEqual(created_server_id, found_server['id'])
        self.assertEqual({'some': 'thing'}, found_server.get('metadata'))
        self.assertEqual('blah', found_server.get('name'))
        self.assertEqual(post['rebuild']['imageRef'],
                         found_server.get('image')['id'])
        self.assertEqual('172.19.0.2', found_server['accessIPv4'])
        self.assertEqual('fe80::2', found_server['accessIPv6'])

        # rebuild the server with empty metadata and nothing else
        post = {}
        post['rebuild'] = {
            "imageRef": "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6",
            "metadata": {},
        }

        self.api.post_server_action(created_server_id, post)
        LOG.debug("rebuilt server: %s" % created_server)
        self.assertTrue(created_server['id'])

        found_server = self.api.get_server(created_server_id)
        self.assertEqual(created_server_id, found_server['id'])
        self.assertEqual({}, found_server.get('metadata'))
        self.assertEqual('blah', found_server.get('name'))
        self.assertEqual(post['rebuild']['imageRef'],
                         found_server.get('image')['id'])
        self.assertEqual('172.19.0.2', found_server['accessIPv4'])
        self.assertEqual('fe80::2', found_server['accessIPv6'])

        # Cleanup
        self._delete_server(created_server_id)
예제 #45
0
    def setUp(self):
        super(CinderCloudTestCase, self).setUp()
        vol_tmpdir = tempfile.mkdtemp()
        self.flags(compute_driver='nova.virt.fake.FakeDriver',
                   volume_api_class='nova.tests.fake_volume.API',
                   volumes_dir=vol_tmpdir)

        def fake_show(meh, context, id):
            return {'id': id,
                    'name': 'fake_name',
                    'container_format': 'ami',
                    'status': 'active',
                    'properties': {
                        'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                        'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175',
                        'type': 'machine',
                        'image_state': 'available'}}

        def fake_detail(_self, context, **kwargs):
            image = fake_show(None, context, None)
            image['name'] = kwargs.get('filters', {}).get('name')
            return [image]

        self.stubs.Set(fake._FakeImageService, 'show', fake_show)
        self.stubs.Set(fake._FakeImageService, 'detail', fake_detail)
        fake.stub_out_image_service(self.stubs)

        def dumb(*args, **kwargs):
            pass

        self.stubs.Set(compute_utils, 'notify_about_instance_usage', dumb)
        fake_network.set_stub_network_methods(self.stubs)

        # set up our cloud
        self.cloud = cloud.CloudController()
        self.flags(compute_scheduler_driver='nova.scheduler.'
                'chance.ChanceScheduler')

        # set up services
        self.compute = self.start_service('compute')
        self.scheduler = self.start_service('scheduler')
        self.network = self.start_service('network')
        self.volume = self.start_service('volume')

        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id,
                                              self.project_id,
                                              is_admin=True)
        self.volume_api = volume.API()
        self.volume_api.reset_fake_api(self.context)

        all_types = {'m1.tiny': 1, 'm1.small': 2, 'm1.medium': 2,
                                    'm1.large': 4, 'm1.xlarge': 4}
        admin_context = context.get_admin_context()
        for name, value in all_types.iteritems():
            inst_type = db.instance_type_get_by_name(admin_context, name)
            ext_spec = {'ecus_per_vcpu:': value}
            db.instance_type_extra_specs_update_or_create(admin_context,
                                                    inst_type["flavorid"],
                                                    ext_spec)

        # NOTE(comstud): Make 'cast' behave like a 'call' which will
        # ensure that operations complete
        self.stubs.Set(rpc, 'cast', rpc.call)

        # make sure we can map ami-00000001/2 to a uuid in FakeImageService
        db.api.s3_image_create(self.context,
                               'cedef40a-ed67-4d10-800e-17455edce175')
        db.api.s3_image_create(self.context,
                               '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
예제 #46
0
    def setUp(self):
        super(TestNovaNotifier, self).setUp()
        nova_CONF.compute_driver = 'nova.virt.fake.FakeDriver'
        nova_CONF.notification_driver = [nova_notifier.__name__]
        nova_CONF.rpc_backend = 'nova.openstack.common.rpc.impl_fake'
        nova_CONF.vnc_enabled = False
        nova_CONF.spice.enabled = False
        self.compute = importutils.import_object(nova_CONF.compute_manager)
        self.context = context.get_admin_context()
        fake_network.set_stub_network_methods(self.stubs)

        self.instance = {"name": "instance-1",
                         'OS-EXT-SRV-ATTR:instance_name': 'instance-1',
                         "id": 1,
                         "image_ref": "FAKE",
                         "user_id": "FAKE",
                         "project_id": "FAKE",
                         "display_name": "FAKE NAME",
                         "hostname": "abcdef",
                         "reservation_id": "FAKE RID",
                         "instance_type_id": 1,
                         "architecture": "x86",
                         "memory_mb": "1024",
                         "root_gb": "20",
                         "ephemeral_gb": "0",
                         "vcpus": 1,
                         'node': "fakenode",
                         "host": "fakehost",
                         "availability_zone":
                         "1e3ce043029547f1a61c1996d1a531a4",
                         "created_at": '2012-05-08 20:23:41',
                         "os_type": "linux",
                         "kernel_id": "kernelid",
                         "ramdisk_id": "ramdiskid",
                         "vm_state": vm_states.ACTIVE,
                         "access_ip_v4": "someip",
                         "access_ip_v6": "someip",
                         "metadata": {},
                         "uuid": "144e08f4-00cb-11e2-888e-5453ed1bbb5f",
                         "system_metadata": {}}
        self.stubs.Set(db, 'instance_info_cache_delete', self.do_nothing)
        self.stubs.Set(db, 'instance_destroy', self.do_nothing)
        self.stubs.Set(db, 'instance_system_metadata_get',
                       self.fake_db_instance_system_metadata_get)
        self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
                       lambda context, instance: {})
        self.stubs.Set(db, 'instance_update_and_get_original',
                       lambda context, uuid, kwargs: (self.instance,
                                                      self.instance))
        self.stubs.Set(flavors, 'extract_flavor',
                       lambda ref: {})

        # Set up to capture the notification messages generated by the
        # plugin and to invoke our notifier plugin.
        self.notifications = []
        notifier_api._reset_drivers()
        notifier_api.add_driver(self)
        notifier_api.add_driver(nova_notifier)

        ext_mgr = test_manager.TestExtensionManager([
            extension.Extension('test',
                                None,
                                None,
                                self.Pollster(),
                                ),
        ])
        self.ext_mgr = ext_mgr
        self.gatherer = nova_notifier.DeletedInstanceStatsGatherer(ext_mgr)
        nova_notifier.initialize_gatherer(self.gatherer)

        # Terminate the instance to trigger the notification.
        with contextlib.nested(
            # Under Grizzly, Nova has moved to no-db access on the
            # compute node. The compute manager uses RPC to talk to
            # the conductor. We need to disable communication between
            # the nova manager and the remote system since we can't
            # expect the message bus to be available, or the remote
            # controller to be there if the message bus is online.
            mock.patch.object(self.compute, 'conductor_api'),
            # The code that looks up the instance uses a global
            # reference to the API, so we also have to patch that to
            # return our fake data.
            mock.patch.object(nova_notifier.instance_info_source,
                              'instance_get_by_uuid',
                              self.fake_instance_ref_get),
        ):
            self.compute.terminate_instance(self.context,
                                            instance=self.instance)
예제 #47
0
    def setUp(self):
        super(TestNovaNotifier, self).setUp()
        nova_CONF.compute_driver = 'nova.virt.fake.FakeDriver'
        nova_CONF.notification_driver = [
            nova_notifier.__name__,
            'nova.openstack.common.notifier.rpc_notifier',
        ]
        nova_CONF.rpc_backend = 'nova.openstack.common.rpc.impl_fake'
        nova_CONF.vnc_enabled = False
        nova_CONF.spice.enabled = False
        self.compute = importutils.import_object(nova_CONF.compute_manager)
        self.context = context.get_admin_context()
        self.stubs = self.useFixture(moxstubout.MoxStubout()).stubs
        fake_network.set_stub_network_methods(self.stubs)

        self.instance_data = {
            "display_name": "instance-1",
            "id": 1,
            "image_ref": "FAKE",
            "user_id": "FAKE",
            "project_id": "FAKE",
            "display_name": "FAKE NAME",
            "hostname": "abcdef",
            "reservation_id": "FAKE RID",
            "instance_type_id": 1,
            "architecture": "x86",
            "memory_mb": "1024",
            "root_gb": "20",
            "ephemeral_gb": "0",
            "vcpus": 1,
            'node': "fakenode",
            "host": "fakehost",
            "availability_zone": "1e3ce043029547f1a61c1996d1a531a4",
            "created_at": '2012-05-08 20:23:41',
            "launched_at": '2012-05-08 20:25:45',
            "terminated_at": '2012-05-09 20:23:41',
            "os_type": "linux",
            "kernel_id": "kernelid",
            "ramdisk_id": "ramdiskid",
            "vm_state": vm_states.ACTIVE,
            "task_state": None,
            "access_ip_v4": "192.168.5.4",
            "access_ip_v6": "2001:DB8::0",
            "metadata": {},
            "uuid": "144e08f4-00cb-11e2-888e-5453ed1bbb5f",
            "system_metadata": {},
            "user_data": None,
            "cleaned": 0,
            "deleted": None,
            "vm_mode": None,
            "deleted_at": None,
            "disable_terminate": False,
            "root_device_name": None,
            "default_swap_device": None,
            "launched_on": None,
            "display_description": None,
            "key_data": None,
            "key_name": None,
            "config_drive": None,
            "power_state": None,
            "default_ephemeral_device": None,
            "progress": 0,
            "scheduled_at": None,
            "updated_at": None,
            "shutdown_terminate": False,
            "cell_name": 'cell',
            "locked": False,
            "locked_by": None,
            "launch_index": 0,
            "auto_disk_config": False,
            "ephemeral_key_uuid": None
        }

        self.instance = nova_instance.Instance()
        self.instance = nova_instance.Instance._from_db_object(
            context,
            self.instance,
            self.instance_data,
            expected_attrs=['metadata', 'system_metadata'])

        self.stubs.Set(db, 'instance_info_cache_delete', self.do_nothing)
        self.stubs.Set(db, 'instance_destroy', self.do_nothing)
        self.stubs.Set(db, 'instance_system_metadata_get',
                       self.fake_db_instance_system_metadata_get)
        self.stubs.Set(db, 'block_device_mapping_get_all_by_instance',
                       lambda context, instance: {})
        self.stubs.Set(db, 'instance_update_and_get_original',
                       lambda *args, **kwargs: (self.instance, self.instance))
        self.stubs.Set(flavors, 'extract_flavor', self.fake_extract_flavor)

        # Set up to capture the notification messages generated by the
        # plugin and to invoke our notifier plugin.
        self.notifications = []

        ext_mgr = extension.ExtensionManager.make_test_instance(extensions=[
            extension.Extension(
                'test',
                None,
                None,
                self.Pollster(),
            ),
        ], )
        self.ext_mgr = ext_mgr
        self.gatherer = nova_notifier.DeletedInstanceStatsGatherer(ext_mgr)
        # Initialize the global _gatherer in nova_notifier to use the
        # gatherer in this test instead of the gatherer in nova_notifier.
        nova_notifier.initialize_gatherer(self.gatherer)

        # Terminate the instance to trigger the notification.
        with contextlib.nested(
                # Under Grizzly, Nova has moved to no-db access on the
                # compute node. The compute manager uses RPC to talk to
                # the conductor. We need to disable communication between
                # the nova manager and the remote system since we can't
                # expect the message bus to be available, or the remote
                # controller to be there if the message bus is online.
                mock.patch.object(self.compute, 'conductor_api'),
                # The code that looks up the instance uses a global
                # reference to the API, so we also have to patch that to
                # return our fake data.
                mock.patch.object(nova_notifier.conductor_api,
                                  'instance_get_by_uuid',
                                  self.fake_instance_ref_get),
                mock.patch(
                    'nova.openstack.common.notifier.rpc_notifier.notify',
                    self.notify)):
            with mock.patch.object(self.compute.conductor_api,
                                   'instance_destroy',
                                   return_value=self.instance):
                self.compute.terminate_instance(self.context,
                                                instance=self.instance,
                                                bdms=[],
                                                reservations=[])
예제 #48
0
    def setUp(self):
        super(CinderCloudTestCase, self).setUp()
        ec2utils.reset_cache()
        vol_tmpdir = self.useFixture(fixtures.TempDir()).path
        fake_utils.stub_out_utils_spawn_n(self.stubs)
        self.flags(compute_driver="nova.virt.fake.FakeDriver", volume_api_class="nova.tests.fake_volume.API")

        def fake_show(meh, context, id):
            return {
                "id": id,
                "name": "fake_name",
                "container_format": "ami",
                "status": "active",
                "properties": {
                    "kernel_id": "cedef40a-ed67-4d10-800e-17455edce175",
                    "ramdisk_id": "cedef40a-ed67-4d10-800e-17455edce175",
                    "type": "machine",
                    "image_state": "available",
                },
            }

        def fake_detail(_self, context, **kwargs):
            image = fake_show(None, context, None)
            image["name"] = kwargs.get("filters", {}).get("name")
            return [image]

        self.stubs.Set(fake._FakeImageService, "show", fake_show)
        self.stubs.Set(fake._FakeImageService, "detail", fake_detail)
        fake.stub_out_image_service(self.stubs)

        def dumb(*args, **kwargs):
            pass

        self.stubs.Set(compute_utils, "notify_about_instance_usage", dumb)
        fake_network.set_stub_network_methods(self.stubs)

        # set up our cloud
        self.cloud = cloud.CloudController()
        self.flags(scheduler_driver="nova.scheduler.chance.ChanceScheduler")

        # Short-circuit the conductor service
        self.flags(use_local=True, group="conductor")

        # set up services
        self.conductor = self.start_service("conductor", manager=CONF.conductor.manager)
        self.compute = self.start_service("compute")
        self.scheduler = self.start_service("scheduler")
        self.network = self.start_service("network")
        self.consoleauth = self.start_service("consoleauth")

        self.user_id = "fake"
        self.project_id = "fake"
        self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True)
        self.volume_api = volume.API()
        self.volume_api.reset_fake_api(self.context)

        self.useFixture(cast_as_call.CastAsCall(self.stubs))

        # make sure we can map ami-00000001/2 to a uuid in FakeImageService
        db.s3_image_create(self.context, "cedef40a-ed67-4d10-800e-17455edce175")
        db.s3_image_create(self.context, "76fa36fc-c930-4bf3-8c8a-ea2a2420deb6")