Exemplo n.º 1
0
    def setUp(self):
        def fake_get_nw_info(cls, ctxt, instance):
            self.assertTrue(ctxt.is_admin)
            return fake_network.fake_get_instance_nw_info(self, 1, 1)

        super(UsageInfoTestCase, self).setUp()
        self.stub_out('nova.network.api.get_instance_nw_info',
                      fake_get_nw_info)

        fake_notifier.stub_notifier(self)
        self.addCleanup(fake_notifier.reset)

        self.flags(compute_driver='fake.FakeDriver',
                   network_manager='nova.network.manager.FlatManager')
        self.compute = manager.ComputeManager()
        self.user_id = 'fake'
        self.project_id = 'fake'
        self.context = context.RequestContext(self.user_id, self.project_id)

        def fake_show(meh, context, id, **kwargs):
            return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}}

        self.flags(group='glance', api_servers=['http://localhost:9292'])
        self.stub_out('nova.tests.unit.image.fake._FakeImageService.show',
                      fake_show)
        fake_network.set_stub_network_methods(self)
        fake_server_actions.stub_out_action_events(self)
Exemplo n.º 2
0
    def test_run_image_cache_manager_pass(self, mock_instance_list):
        def fake_instances(ctxt):
            instances = []
            for x in range(2):
                instances.append(
                    fake_instance.fake_db_instance(
                        image_ref=uuids.fake_image_ref,
                        uuid=getattr(uuids, 'instance_%s' % x),
                        name='instance-%s' % x,
                        vm_state='',
                        task_state=''))
            return objects.instance._make_instance_list(
                ctxt, objects.InstanceList(), instances, None)

        with utils.tempdir() as tmpdir:
            self.flags(instances_path=tmpdir)
            ctxt = context.get_admin_context()
            mock_instance_list.return_value = fake_instances(ctxt)
            compute = compute_manager.ComputeManager()
            compute._run_image_cache_manager_pass(ctxt)
            filters = {
                'host': ['fake-mini'],
                'deleted': False,
                'soft_deleted': True,
            }
            mock_instance_list.assert_called_once_with(ctxt,
                                                       filters,
                                                       expected_attrs=[],
                                                       use_slave=True)
Exemplo n.º 3
0
    def setUp(self):
        super(ComputeDriverCPUMonitorTestCase, self).setUp()

        self.flags(compute_driver='nova.virt.libvirt.LibvirtDriver')
        self.useFixture(fixtures.MonkeyPatch(
            'nova.virt.libvirt.driver.LibvirtDriver._conn',
            FakeLibvirt()))
        cm = manager.ComputeManager()
        self.monitor = virt.ComputeDriverCPUMonitor(cm)
Exemplo n.º 4
0
 def __init__(self, params):
     print 'starting init'
     threading.Thread.__init__(self)
     self.manager = compute_manager.ComputeManager()
     self.running = False
     self.shuttingdown = False
     self.refresh_rate = int(params['refresh_rate'])
     self.status = {}
     self._update_hypervisor()
     print 'finished init'
Exemplo n.º 5
0
    def setUp(self):
        super(ComputeXenTestCase, self).setUp()
        self.flags(compute_driver='xenapi.XenAPIDriver')
        self.flags(connection_url='http://localhost',
                   connection_password='******',
                   group='xenserver')

        stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
        self.compute = manager.ComputeManager()
        # execute power syncing synchronously for testing:
        self.compute._sync_power_pool = eventlet_utils.SyncPool()
Exemplo n.º 6
0
    def test_load_new_drivers(self):
        for cls, driver in self.new_drivers.items():
            self.flags(compute_driver=cls)
            # NOTE(sdague) the try block is to make it easier to debug a
            # failure by knowing which driver broke
            try:
                cm = manager.ComputeManager()
            except Exception as e:
                self.fail("Couldn't load driver %s - %s" % (cls, e))

            self.assertEqual(cm.driver.__class__.__name__, driver,
                             "Could't load driver %s" % cls)
Exemplo n.º 7
0
    def _build_resources(self, context, instance, block_device_mapping):
        LOG.debug('Start building block device mappings for instance.',
                  instance=self.instance)
        resources = {}
        instance.vm_state = vm_states.BUILDING
        instance.task_state = task_states.BLOCK_DEVICE_MAPPING
        instance.save()

        block_device_info = compute_manager.ComputeManager().\
            _prep_block_device(context, instance,
                               block_device_mapping)
        resources['block_device_info'] = block_device_info
        return resources
Exemplo n.º 8
0
    def setUp(self):
        super(MultiNodeComputeTestCase, self).setUp()
        self.flags(compute_driver='fake.FakeDriver')
        self.compute = manager.ComputeManager()

        def fake_get_compute_nodes_in_db(context, use_slave=False):
            fake_compute_nodes = [{
                'local_gb': 259,
                'uuid': uuidsentinel.fake_compute,
                'vcpus_used': 0,
                'deleted': 0,
                'hypervisor_type': 'powervm',
                'created_at': '2013-04-01T00:27:06.000000',
                'local_gb_used': 0,
                'updated_at': '2013-04-03T00:35:41.000000',
                'hypervisor_hostname': 'fake_phyp1',
                'memory_mb_used': 512,
                'memory_mb': 131072,
                'current_workload': 0,
                'vcpus': 16,
                'cpu_info': 'ppc64,powervm,3940',
                'running_vms': 0,
                'free_disk_gb': 259,
                'service_id': 7,
                'hypervisor_version': 7,
                'disk_available_least': 265856,
                'deleted_at': None,
                'free_ram_mb': 130560,
                'metrics': '',
                'numa_topology': '',
                'stats': '',
                'id': 2,
                'host': 'fake_phyp1',
                'cpu_allocation_ratio': None,
                'ram_allocation_ratio': None,
                'disk_allocation_ratio': None,
                'host_ip': '127.0.0.1'
            }]
            return [
                objects.ComputeNode._from_db_object(context,
                                                    objects.ComputeNode(), cn)
                for cn in fake_compute_nodes
            ]

        def fake_compute_node_delete(context, compute_node_id):
            self.assertEqual(2, compute_node_id)

        self.stubs.Set(self.compute, '_get_compute_nodes_in_db',
                       fake_get_compute_nodes_in_db)
        self.stub_out('nova.db.compute_node_delete', fake_compute_node_delete)
Exemplo n.º 9
0
    def test_load_new_drivers(self):
        def fake_do_setup(_self, compute_task_api):
            pass

        self.stubs.Set(cgcs_messaging.CGCSMessaging, '_do_setup',
                       fake_do_setup)
        for cls, driver in self.new_drivers.items():
            self.flags(compute_driver=cls)
            # NOTE(sdague) the try block is to make it easier to debug a
            # failure by knowing which driver broke
            try:
                cm = manager.ComputeManager()
            except Exception as e:
                self.fail("Couldn't load driver %s - %s" % (cls, e))

            self.assertEqual(cm.driver.__class__.__name__, driver,
                             "Could't load driver %s" % cls)
Exemplo n.º 10
0
    def __init__(self, *args, **kwargs):
        self.vms_conn = kwargs.pop('vmsconn', None)

        self._init_vms()
        self.network_api = network.API()
        self.gridcentric_api = API()
        self.compute_manager = compute_manager.ComputeManager()

        # Use an eventlet green thread condition lock instead of the regular threading module. This
        # is required for eventlet threads because they essentially run on a single system thread.
        # All of the green threads will share the same base lock, defeating the point of using the
        # it. Since the main threading module is not monkey patched we cannot use it directly.
        self.cond = gthreading.Condition()
        self.locked_instances = {}
        super(GridCentricManager, self).__init__(service_name="gridcentric",
                                                 *args,
                                                 **kwargs)