def setUp(self): super(VolumeTestCase, self).setUp() self.compute = utils.import_object(FLAGS.compute_manager) self.flags(connection_type='fake') self.volume = utils.import_object(FLAGS.volume_manager) self.context = context.get_admin_context() self.instance_id = db.instance_create(self.context, {})['id']
def __init__(self): super(MultiScheduler, self).__init__() compute_driver = utils.import_object(FLAGS.compute_scheduler_driver) volume_driver = utils.import_object(FLAGS.volume_scheduler_driver) self.drivers = {'compute': compute_driver, 'volume': volume_driver}
def __init__(self, dns_driver=None, dns_instance_entry_factory=None, *args, **kwargs): if not dns_driver: dns_driver = FLAGS.dns_driver self.driver = utils.import_object(dns_driver) if not dns_instance_entry_factory: dns_instance_entry_factory = FLAGS.dns_instance_entry_factory self.entry_factory = utils.import_object(dns_instance_entry_factory) super(DnsManager, self).__init__(*args, **kwargs)
def __init__(self, compute_driver=None, *args, **kwargs): """Load configuration options and connect to the hypervisor.""" # TODO(vish): sync driver creation logic with the rest of the system # and redocument the module docstring if not compute_driver: compute_driver = FLAGS.compute_driver self.driver = utils.import_object(compute_driver) self.network_manager = utils.import_object(FLAGS.network_manager) self.volume_manager = utils.import_object(FLAGS.volume_manager) super(ComputeManager, self).__init__(*args, **kwargs)
def __init__(self, vsa_driver=None, *args, **kwargs): if not vsa_driver: vsa_driver = FLAGS.vsa_driver self.driver = utils.import_object(vsa_driver) self.compute_manager = utils.import_object(FLAGS.compute_manager) self.compute_api = compute.API() self.volume_api = volume.API() self.vsa_api = vsa_api.API() if FLAGS.vsa_ec2_user_id is None or FLAGS.vsa_ec2_access_key is None: raise exception.VSANovaAccessParamNotFound() super(VsaManager, self).__init__(*args, **kwargs)
def setUp(self): super(VMWareAPIVMTestCase, self).setUp() self.context = context.RequestContext('fake', 'fake', False) self.flags(vmwareapi_host_ip='test_url', vmwareapi_host_username='******', vmwareapi_host_password='******') self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) self.network = utils.import_object(FLAGS.network_manager) vmwareapi_fake.reset() db_fakes.stub_out_db_instance_api(self.stubs) stubs.set_stubs(self.stubs) glance_stubs.stubout_glance_client(self.stubs) self.conn = vmwareapi_conn.get_connection(False) # NOTE(vish): none of the network plugging code is actually # being tested self.network_info = [({'bridge': 'fa0', 'id': 0, 'vlan': None, 'bridge_interface': None, 'injected': True}, {'broadcast': '192.168.0.255', 'dns': ['192.168.0.1'], 'gateway': '192.168.0.1', 'gateway6': 'dead:beef::1', 'ip6s': [{'enabled': '1', 'ip': 'dead:beef::dcad:beff:feef:0', 'netmask': '64'}], 'ips': [{'enabled': '1', 'ip': '192.168.0.100', 'netmask': '255.255.255.0'}], 'label': 'fake', 'mac': 'DE:AD:BE:EF:00:00', 'rxtx_cap': 3})]
def setUp(self): super(XenAPIVMTestCase, self).setUp() self.network = utils.import_object(FLAGS.network_manager) self.stubs = stubout.StubOutForTesting() self.flags(xenapi_connection_url='test_url', xenapi_connection_password='******', instance_name_template='%d') xenapi_fake.reset() xenapi_fake.create_local_srs() xenapi_fake.create_local_pifs() db_fakes.stub_out_db_instance_api(self.stubs) xenapi_fake.create_network('fake', FLAGS.flat_network_bridge) stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) stubs.stubout_get_this_vm_uuid(self.stubs) stubs.stubout_stream_disk(self.stubs) stubs.stubout_is_vdi_pv(self.stubs) self.stubs.Set(vmops.VMOps, 'reset_network', reset_network) self.stubs.Set(vmops.VMOps, '_find_rescue_vbd_ref', _find_rescue_vbd_ref) stubs.stub_out_vm_methods(self.stubs) glance_stubs.stubout_glance_client(self.stubs) fake_utils.stub_out_utils_execute(self.stubs) self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) self.conn = xenapi_conn.get_connection(False)
def test_spawn_vlanmanager(self): self.flags(xenapi_image_service='glance', network_manager='nova.network.manager.VlanManager', network_driver='nova.network.xenapi_net', vlan_interface='fake0') def dummy(*args, **kwargs): pass self.stubs.Set(VMOps, 'create_vifs', dummy) # Reset network table xenapi_fake.reset_table('network') # Instance id = 2 will use vlan network (see db/fakes.py) ctxt = self.context.elevated() instance_ref = self._create_instance(2) network_bk = self.network # Ensure we use xenapi_net driver self.network = utils.import_object(FLAGS.network_manager) networks = self.network.db.network_get_all(ctxt) for network in networks: self.network.set_network_host(ctxt, network['id']) self.network.allocate_for_instance(ctxt, instance_id=instance_ref.id, instance_type_id=1, project_id=self.project.id) self.network.setup_compute_network(ctxt, instance_ref.id) self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE, glance_stubs.FakeGlance.IMAGE_KERNEL, glance_stubs.FakeGlance.IMAGE_RAMDISK, instance_id=instance_ref.id, create_record=False) # TODO(salvatore-orlando): a complete test here would require # a check for making sure the bridge for the VM's VIF is # consistent with bridge specified in nova db self.network = network_bk
def setUp(self): super(LinuxNetworkTestCase, self).setUp() network_driver = FLAGS.network_driver self.driver = utils.import_object(network_driver) self.driver.db = db self.context = context.RequestContext('testuser', 'testproject', is_admin=True)
def __init__(self, application, db_driver=None): if not db_driver: db_driver = FLAGS.db_driver # pylint: disable=C0103 self.db = utils.import_object(db_driver) self.auth = auth.manager.AuthManager() super(KeystoneAuthShim, self).__init__(application)
def setUp(self): self.maxDiff = None super(ServerActionsControllerTest, self).setUp() self.stubs = stubout.StubOutForTesting() fakes.stub_out_auth(self.stubs) self.stubs.Set(nova.db, 'instance_get', return_server_by_id) self.stubs.Set(nova.db, 'instance_get_by_uuid', return_server_by_uuid) self.stubs.Set(nova.db, 'instance_update', instance_update) fakes.stub_out_glance(self.stubs) fakes.stub_out_nw_api(self.stubs) fakes.stub_out_rate_limiting(self.stubs) self.snapshot = fakes.stub_out_compute_api_snapshot(self.stubs) self.backup = fakes.stub_out_compute_api_backup(self.stubs) service_class = 'nova.image.glance.GlanceImageService' self.service = utils.import_object(service_class) self.context = context.RequestContext(1, None) self.service.delete_all() self.sent_to_glance = {} fakes.stub_out_glance_add_image(self.stubs, self.sent_to_glance) self.flags(allow_instance_snapshots=True) self.uuid = FAKE_UUID self.url = '/v1.1/fake/servers/%s/action' % self.uuid self.controller = servers.Controller()
def test_spawn_vlanmanager(self): self.flags( xenapi_image_service="glance", network_manager="nova.network.manager.VlanManager", network_driver="nova.network.xenapi_net", vlan_interface="fake0", ) # Reset network table xenapi_fake.reset_table("network") # Instance id = 2 will use vlan network (see db/fakes.py) fake_instance_id = 2 network_bk = self.network # Ensure we use xenapi_net driver self.network = utils.import_object(FLAGS.network_manager) self.network.setup_compute_network(None, fake_instance_id) self._test_spawn( glance_stubs.FakeGlance.IMAGE_MACHINE, glance_stubs.FakeGlance.IMAGE_KERNEL, glance_stubs.FakeGlance.IMAGE_RAMDISK, instance_id=fake_instance_id, ) # TODO(salvatore-orlando): a complete test here would require # a check for making sure the bridge for the VM's VIF is # consistent with bridge specified in nova db self.network = network_bk
def __init__(self, network_driver=None, *args, **kwargs): if not network_driver: network_driver = FLAGS.network_driver self.driver = utils.import_object(network_driver) self.network_api = network_api.API() self.compute_api = compute_api.API() super(NetworkManager, self).__init__(service_name="network", *args, **kwargs)
def setUp(self): super(ServerActionsControllerTest, self).setUp() fakes.stub_out_auth(self.stubs) self.stubs.Set(nova.db, 'instance_get_by_uuid', fakes.fake_instance_get(vm_state=vm_states.ACTIVE, host='fake_host')) self.stubs.Set(nova.db, 'instance_update', instance_update) fakes.stub_out_glance(self.stubs) fakes.stub_out_nw_api(self.stubs) fakes.stub_out_rate_limiting(self.stubs) fakes.stub_out_compute_api_snapshot(self.stubs) fakes.stub_out_image_service(self.stubs) service_class = 'nova.image.glance.GlanceImageService' self.service = utils.import_object(service_class) self.service.delete_all() self.sent_to_glance = {} fakes.stub_out_glance_add_image(self.stubs, self.sent_to_glance) self.flags(allow_instance_snapshots=True, enable_instance_password=True) self.uuid = FAKE_UUID self.url = '/v2/fake/servers/%s/action' % self.uuid self._image_href = '155d900f-4e14-4e4c-a73d-069cbf4541e6' self.controller = servers.Controller()
def __init__(self, compute_driver=None, *args, **kwargs): """Load configuration options and connect to the hypervisor.""" # TODO(vish): sync driver creation logic with the rest of the system # and redocument the module docstring if not compute_driver: compute_driver = FLAGS.compute_driver try: self.driver = utils.import_object(compute_driver) except ImportError: LOG.error("Unable to load the virtualization driver.") sys.exit(1) self.network_manager = utils.import_object(FLAGS.network_manager) self.volume_manager = utils.import_object(FLAGS.volume_manager) super(ComputeManager, self).__init__(*args, **kwargs)
def setUp(self): super(AdminApiTestCase, self).setUp() self.flags(connection_type="fake") # set up our cloud self.api = admin.AdminController() # set up services self.compute = self.start_service("compute") self.scheduter = self.start_service("scheduler") self.network = self.start_service("network") self.volume = self.start_service("volume") self.image_service = utils.import_object(FLAGS.image_service) self.user_id = "admin" self.project_id = "admin" self.context = context.RequestContext(self.user_id, self.project_id, True) def fake_show(meh, context, id): return { "id": 1, "properties": {"kernel_id": 1, "ramdisk_id": 1, "type": "machine", "image_state": "available"}, } self.stubs.Set(fake._FakeImageService, "show", fake_show) self.stubs.Set(fake._FakeImageService, "show_by_name", fake_show) # NOTE(vish): set up a manual wait so rpc.cast has a chance to finish rpc_cast = rpc.cast def finish_cast(*args, **kwargs): rpc_cast(*args, **kwargs) greenthread.sleep(0.2) self.stubs.Set(rpc, "cast", finish_cast)
def __init__(self, scheduler_driver=None, *args, **kwargs): self.zone_manager = zone_manager.ZoneManager() if not scheduler_driver: scheduler_driver = FLAGS.scheduler_driver self.driver = utils.import_object(scheduler_driver) self.driver.set_zone_manager(self.zone_manager) super(SchedulerManager, self).__init__(*args, **kwargs)
def setUp(self): super(XenAPIVMTestCase, self).setUp() self.manager = manager.AuthManager() self.user = self.manager.create_user("fake", "fake", "fake", admin=True) self.project = self.manager.create_project("fake", "fake", "fake") self.network = utils.import_object(FLAGS.network_manager) self.stubs = stubout.StubOutForTesting() self.flags( xenapi_connection_url="test_url", xenapi_connection_password="******", instance_name_template="%d" ) xenapi_fake.reset() xenapi_fake.create_local_srs() xenapi_fake.create_local_pifs() db_fakes.stub_out_db_instance_api(self.stubs) xenapi_fake.create_network("fake", FLAGS.flat_network_bridge) stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) stubs.stubout_get_this_vm_uuid(self.stubs) stubs.stubout_stream_disk(self.stubs) stubs.stubout_is_vdi_pv(self.stubs) self.stubs.Set(VMOps, "reset_network", reset_network) stubs.stub_out_vm_methods(self.stubs) glance_stubs.stubout_glance_client(self.stubs, glance_stubs.FakeGlance) fake_utils.stub_out_utils_execute(self.stubs) self.context = context.RequestContext("fake", "fake", False) self.conn = xenapi_conn.get_connection(False)
def setUp(self): super(AdminApiTestCase, self).setUp() self.flags(connection_type='fake') # set up our cloud self.api = admin.AdminController() # set up services self.compute = self.start_service('compute') self.scheduter = self.start_service('scheduler') self.network = self.start_service('network') self.volume = self.start_service('volume') self.image_service = utils.import_object(FLAGS.image_service) self.user_id = 'admin' self.project_id = 'admin' self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True) def fake_show(meh, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, 'type': 'machine', 'image_state': 'available'}} self.stubs.Set(fake._FakeImageService, 'show', fake_show) self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) # NOTE(comstud): Make 'cast' behave like a 'call' which will # ensure that operations complete self.stubs.Set(rpc, 'cast', rpc.call)
def __init__(self, q_conn=None, ipam_lib=None, *args, **kwargs): """Initialize two key libraries, the connection to a Quantum service, and the library for implementing IPAM. Calls inherited FlatManager constructor. """ if not q_conn: q_conn = quantum_connection.QuantumClientConnection() self.q_conn = q_conn if not ipam_lib: ipam_lib = FLAGS.quantum_ipam_lib self.ipam = utils.import_object(ipam_lib).get_ipam_lib(self) super(QuantumManager, self).__init__(*args, **kwargs) # Initialize forwarding rules for anything specified in # FLAGS.fixed_range() self.driver.init_host() # Set up all the forwarding rules for any network that has a # gateway set. networks = self.get_all_networks() for net in networks: LOG.debug("Initializing network: %s (cidr: %s, gw: %s)" % ( net['label'], net['cidr'], net['gateway'])) if net['gateway']: self.driver.init_host(net['cidr']) self.driver.ensure_metadata_ip() self.driver.metadata_forward()
def setUp(self): super(CloudTestCase, self).setUp() self.flags(connection_type='fake') self.conn = rpc.Connection.instance() # set up our cloud self.cloud = cloud.CloudController() # set up services self.compute = self.start_service('compute') self.scheduter = self.start_service('scheduler') self.network = self.start_service('network') self.image_service = utils.import_object(FLAGS.image_service) self.manager = manager.AuthManager() self.user = self.manager.create_user('admin', 'admin', 'admin', True) self.project = self.manager.create_project('proj', 'admin', 'proj') self.context = context.RequestContext(user=self.user, project=self.project) host = self.network.get_network_host(self.context.elevated()) def fake_show(meh, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, 'type': 'machine'}} self.stubs.Set(local.LocalImageService, 'show', fake_show) self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show)
def get_dns_entry_factory(): """Returns a DNS entry factory.""" global _dns_entry_factory if not _dns_entry_factory: class_name = FLAGS.dns_instance_entry_factory _dns_entry_factory = utils.import_object(class_name) return _dns_entry_factory
def setUp(self): super(NetworkTestCase, self).setUp() # NOTE(vish): if you change these flags, make sure to change the # flags in the corresponding section in nova-dhcpbridge self.flags(connection_type='fake', fake_call=True, fake_network=True, network_size=16, num_networks=5) self.manager = manager.AuthManager() self.user = self.manager.create_user('netuser', 'netuser', 'netuser') self.projects = [] self.network = utils.import_object(FLAGS.network_manager) self.context = context.RequestContext(project=None, user=self.user) for i in range(5): name = 'project%s' % i project = self.manager.create_project(name, 'netuser', name) self.projects.append(project) # create the necessary network data for the project user_context = context.RequestContext(project=self.projects[i], user=self.user) host = self.network.get_network_host(user_context.elevated()) instance_ref = self._create_instance(0) self.instance_id = instance_ref['id'] instance_ref = self._create_instance(1) self.instance2_id = instance_ref['id']
def setUp(self): super(EC2ValidateTestCase, self).setUp() self.flags(connection_type='fake', stub_network=True) def dumb(*args, **kwargs): pass self.stubs.Set(utils, 'usage_from_instance', dumb) # set up our cloud self.cloud = cloud.CloudController() # set up services self.compute = self.start_service('compute') self.scheduter = self.start_service('scheduler') self.network = self.start_service('network') self.volume = self.start_service('volume') self.image_service = utils.import_object(FLAGS.image_service) self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id, is_admin=True) self.EC2_MALFORMED_IDS = ['foobar', '', 123] self.EC2_VALID__IDS = ['i-284f3a41', 'i-001', 'i-deadbeef'] self.ec2_id_exception_map = [(x, exception.InvalidInstanceIDMalformed) for x in self.EC2_MALFORMED_IDS] self.ec2_id_exception_map.extend([(x, exception.InstanceNotFound) for x in self.EC2_VALID__IDS]) self.volume_id_exception_map = [(x, exception.InvalidInstanceIDMalformed) for x in self.EC2_MALFORMED_IDS] self.volume_id_exception_map.extend([(x, exception.VolumeNotFound) for x in self.EC2_VALID__IDS]) def fake_show(meh, context, id): return {'id': id, 'container_format': 'ami', 'properties': { 'kernel_id': 'cedef40a-ed67-4d10-800e-17455edce175', 'ramdisk_id': 'cedef40a-ed67-4d10-800e-17455edce175', 'type': 'machine', 'image_state': 'available'}} self.stubs.Set(fake._FakeImageService, 'show', fake_show) self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) # NOTE(comstud): Make 'cast' behave like a 'call' which will # ensure that operations complete self.stubs.Set(rpc, 'cast', rpc.call) # make sure we can map ami-00000001/2 to a uuid in FakeImageService db.api.s3_image_create(self.context, 'cedef40a-ed67-4d10-800e-17455edce175') db.api.s3_image_create(self.context, '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6')
def setUp(self): super(FlatNetworkTestCase, self).setUp() self.network = network_manager.FlatManager(host=HOST) temp = utils.import_object('nova.network.minidns.MiniDNS') self.network.instance_dns_manager = temp self.network.db = db self.context = context.RequestContext('testuser', 'testproject', is_admin=False)
def __init__(self, application, db_driver=None): logger.info("Starting the %s component", PROTOCOL_NAME) if not db_driver: db_driver = FLAGS.db_driver # pylint: disable=C0103 self.db = utils.import_object(db_driver) self.auth = auth.manager.AuthManager() super(KeystoneAuthShim, self).__init__(application)
def setUp(self): super(LocalImageServiceTest, self).setUp() self.tempdir = tempfile.mkdtemp() self.flags(images_path=self.tempdir) self.stubs = stubout.StubOutForTesting() service_class = "nova.image.local.LocalImageService" self.service = utils.import_object(service_class) self.context = context.RequestContext(None, None)
def get_dns_entry_factory(): """Returns a DNS entry factory.""" global _dns_entry_factory if not _dns_entry_factory: class_name = test_config.values["dns_instance_entry_factory"] _dns_entry_factory = utils.import_object(class_name) _dns_entry_factory = _dns_entry_factory() return _dns_entry_factory
def setUp(self): self.stubs = stubout.StubOutForTesting() fakes.stub_out_glance(self.stubs) fakes.stub_out_compute_api_snapshot(self.stubs) service_class = 'nova.image.glance.GlanceImageService' self.service = utils.import_object(service_class) self.context = context.RequestContext(None, None) self.service.delete_all()
def __init__(self, driver=None, *args, **kwargs): """Inits the driver from parameter or flag __init__ is run every time AuthManager() is called, so we only reset the driver if it is not set or a new driver is specified. """ self.network_manager = utils.import_object(FLAGS.network_manager) if driver or not getattr(self, 'driver', None): self.driver = utils.import_class(driver or FLAGS.auth_driver)
def __init__(self): self._service = utils.import_object(FLAGS.image_service)
def __init__(self, db_driver=None): if not db_driver: db_driver = FLAGS.db_driver self.db = utils.import_object(db_driver) # pylint: disable=C0103
def __init__(self, session): """Initializer.""" self._session = session self._vif_driver = utils.import_object(FLAGS.vmware_vif_driver)
def __init__(self, application, db_driver=None): if not db_driver: db_driver = FLAGS.db_driver self.db = utils.import_object(db_driver) self.auth = auth.manager.AuthManager() super(AuthMiddleware, self).__init__(application)
def __init__(self, *args, **kwargs): super(MetadataManager, self).__init__(*args, **kwargs) self.network_driver = utils.import_object(FLAGS.network_driver)
'Interface', dev, "external-ids:iface-status=active", '--', 'set', 'Interface', dev, "external-ids:attached-mac=%s" % mac_address, run_as_root=True) _execute('ip', 'link', 'set', dev, "address", mac_address, run_as_root=True) _execute('ip', 'link', 'set', dev, 'up', run_as_root=True) return dev def unplug(self, network): return self.get_dev(network) def get_dev(self, network): dev = "gw-" + str(network['id']) return dev iptables_manager = IptablesManager() interface_driver = utils.import_object(FLAGS.linuxnet_interface_driver)
def hosts_up(topic): """Returns list of hosts running for a topic.""" scheduler_driver = FLAGS.scheduler_driver driver = utils.import_object(scheduler_driver) return driver.hosts_up(FakeContext(), topic)
def __init__(self, scheduler_driver=None, *args, **kwargs): if not scheduler_driver: scheduler_driver = FLAGS.scheduler_driver self.driver = utils.import_object(scheduler_driver) super(SchedulerManager, self).__init__(*args, **kwargs)
def _get_impl(): """Delay import of rpc_backend until FLAGS are loaded.""" global _RPCIMPL if _RPCIMPL is None: _RPCIMPL = import_object(FLAGS.rpc_backend) return _RPCIMPL
def __init__(self, console_driver=None, *args, **kwargs): if not console_driver: console_driver = FLAGS.console_driver self.driver = utils.import_object(console_driver) super(ConsoleProxyManager, self).__init__(*args, **kwargs) self.driver.host = self.host
def __init__(self, service=None, *args, **kwargs): if service is None: service = utils.import_object(FLAGS.image_service) self.service = service self.service.__init__(*args, **kwargs)
def __init__(self): self.host_manager = utils.import_object(FLAGS.scheduler_host_manager) self.compute_api = compute_api.API()
def __init__(self): self.compute_api = compute.API() self.sgh = utils.import_object(FLAGS.security_group_handler)
def _get_interface_driver(): global interface_driver if not interface_driver: interface_driver = utils.import_object(FLAGS.linuxnet_interface_driver) return interface_driver
def __init__(self, controller): """We need the image service to create an instance.""" self.controller = controller self._image_service = utils.import_object(FLAGS.image_service) super(CreateInstanceHelper, self).__init__()
def setUp(self): super(ConsoleauthTestCase, self).setUp() self.manager = utils.import_object(FLAGS.consoleauth_manager) self.context = context.get_admin_context()
def setUp(self): super(LinuxNetworkTestCase, self).setUp() network_driver = FLAGS.network_driver self.driver = utils.import_object(network_driver) self.driver.db = db
def __init__(self, *args, **kwargs): super(SecurityGroupActionController, self).__init__(*args, **kwargs) self.compute_api = compute.API() self.sgh = utils.import_object(FLAGS.security_group_handler)
def create_driver(self): """Creates the DNS Driver used in subsequent tests.""" self.driver = utils.import_object(FLAGS.dns_driver) self.entry_factory = RsDnsInstanceEntryFactory() self.test_uuid = uuid.uuid4().hex self.new_records = {}
def _setup_other_managers(self): self.volume_manager = utils.import_object(FLAGS.volume_manager) self.network_manager = utils.import_object(FLAGS.network_manager) self.compute_driver = utils.import_object(FLAGS.compute_driver)
def __init__(self): self.compute_api = compute.API() self._image_service = utils.import_object(FLAGS.image_service) super(Controller, self).__init__()