def test_get_dhcp_opts_for_nw01(self): def get_instance(_context, instance_id): print instance_id return instances[instance_id] self.stubs.Set(db, 'instance_get', get_instance) self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') db.network_get_associated_fixed_ips( mox.IgnoreArg(), mox.IgnoreArg()).AndReturn([fixed_ips[1], fixed_ips[2], fixed_ips[5]]) db.virtual_interface_get_by_instance( mox.IgnoreArg(), mox.IgnoreArg()).AndReturn([vifs[0], vifs[1], vifs[4]]) db.virtual_interface_get_by_instance( mox.IgnoreArg(), mox.IgnoreArg()).AndReturn([vifs[2], vifs[3], vifs[5]]) self.mox.ReplayAll() expected_opts = "NW-i00000000-1,3" actual_opts = self.driver.get_dhcp_opts(self.context, networks[1]) self.assertEquals(actual_opts, expected_opts)
def test_get_dhcp_opts_for_nw01(self): self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') db.network_get_associated_fixed_ips(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([fixed_ips[1], fixed_ips[2], fixed_ips[5]]) db.virtual_interface_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([vifs[0], vifs[1], vifs[4]]) db.virtual_interface_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([vifs[2], vifs[3], vifs[5]]) self.mox.ReplayAll() expected_opts = "NW-i00000000-1,3" actual_opts = self.driver.get_dhcp_opts(None, networks[1]) self.assertEquals(actual_opts, expected_opts)
def test_get_dhcp_opts_for_nw01(self): def get_instance(_context, instance_id): print instance_id return instances[instance_id] self.stubs.Set(db, 'instance_get', get_instance) self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') db.network_get_associated_fixed_ips(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn([ fixed_ips[1], fixed_ips[2], fixed_ips[5] ]) db.virtual_interface_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [vifs[0], vifs[1], vifs[4]]) db.virtual_interface_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( [vifs[2], vifs[3], vifs[5]]) self.mox.ReplayAll() expected_opts = "NW-i00000000-1,3" actual_opts = self.driver.get_dhcp_opts(self.context, networks[1]) self.assertEquals(actual_opts, expected_opts)
def test_get_instance_nw_info(self): self.mox.StubOutWithMock(db, 'fixed_ip_get_by_instance') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') self.mox.StubOutWithMock(db, 'instance_type_get') db.fixed_ip_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(fixed_ips) db.virtual_interface_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs) db.instance_type_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(flavor) self.mox.ReplayAll() nw_info = self.network.get_instance_nw_info(None, 0, 0, None) self.assertTrue(nw_info) for i, nw in enumerate(nw_info): i8 = i + 8 check = {'bridge': 'fa%s' % i, 'cidr': '192.168.%s.0/24' % i, 'cidr_v6': '2001:db%s::/64' % i8, 'id': i, 'multi_host': False, 'injected': 'DONTCARE', 'bridge_interface': 'fake_fa%s' % i, 'vlan': None} self.assertDictMatch(nw[0], check) check = {'broadcast': '192.168.%s.255' % i, 'dhcp_server': '192.168.%s.1' % i, 'dns': 'DONTCARE', 'gateway': '192.168.%s.1' % i, 'gateway6': '2001:db%s::1' % i8, 'ip6s': 'DONTCARE', 'ips': 'DONTCARE', 'label': 'test%s' % i, 'mac': 'DE:AD:BE:EF:00:0%s' % i, 'vif_uuid': ('00000000-0000-0000-0000-000000000000000%s' % i), 'rxtx_cap': 'DONTCARE', 'should_create_vlan': False, 'should_create_bridge': False} self.assertDictMatch(nw[1], check) check = [{'enabled': 'DONTCARE', 'ip': '2001:db%s::dcad:beff:feef:%s' % (i8, i), 'netmask': '64'}] self.assertDictListMatch(nw[1]['ip6s'], check) check = [{'enabled': '1', 'ip': '192.168.%s.100' % i, 'netmask': '255.255.255.0'}] self.assertDictListMatch(nw[1]['ips'], check)
def get_by_instance_uuid(cls, context, instance_uuid, use_subordinate=False): db_vifs = db.virtual_interface_get_by_instance( context, instance_uuid, use_subordinate=use_subordinate) return base.obj_make_list(context, cls(), VirtualInterface, db_vifs)
def _from_bm_node(instance_id, tenant_id): LOG.debug('_from_bm_node(instance_id=%s,tenant_id=%s)', instance_id, tenant_id) ctx = context.get_admin_context() info = [] for vif in db.virtual_interface_get_by_instance(ctx, instance_id): LOG.debug('vif=%s', vif.__dict__) mac = vif.address network_ref = None if vif['network_id']: network_ref = db.network_get(ctx, vif['network_id']) if not network_ref: LOG.warn('vif.network is None') continue LOG.debug('vif.network=%s', network_ref.__dict__) network_uuid = network_ref.uuid if not network_uuid: LOG.warn('network_uuid is None') continue vifinfo_uuid = _get_vifinfo_uuid(tenant_id, network_uuid, vif.uuid) LOG.debug('vifinfo_uuid=%s', vifinfo_uuid) if not vifinfo_uuid: continue fixed_ips = db.fixed_ips_by_virtual_interface(ctx, vif.id) if not fixed_ips: LOG.warn('fixed_ip is None') continue addrs = [fip.address for fip in fixed_ips] info.append((vifinfo_uuid, network_uuid, mac, addrs)) LOG.debug('_from_bm_node(instance_id=%s,tenant_id=%s) end: info=%s', instance_id, tenant_id, info) return info
def get_dhcp_opts(context, network_ref): """Get network's hosts config in dhcp-opts format.""" hosts = [] host = None if network_ref['multi_host']: host = FLAGS.host data = db.network_get_associated_fixed_ips(context, network_ref['id'], host=host) if data: #set of instance ids instance_set = set([datum['instance_id'] for datum in data]) default_gw_vif = {} for instance_id in instance_set: vifs = db.virtual_interface_get_by_instance(context, instance_id) if vifs: #offer a default gateway to the first virtual interface default_gw_vif[instance_id] = vifs[0]['id'] for datum in data: if instance_id in default_gw_vif: # we don't want default gateway for this fixed ip if default_gw_vif[instance_id] != datum['vif_id']: hosts.append(_host_dhcp_opts(datum)) return '\n'.join(hosts)
def get_dhcp_opts(context, network_ref): """Get network's hosts config in dhcp-opts format.""" hosts = [] ips_ref = db.network_get_associated_fixed_ips(context, network_ref['id']) if ips_ref: #set of instance ids instance_set = set([fixed_ip_ref['instance_id'] for fixed_ip_ref in ips_ref]) default_gw_network_node = {} for instance_id in instance_set: vifs = db.virtual_interface_get_by_instance(context, instance_id) if vifs: #offer a default gateway to the first virtual interface default_gw_network_node[instance_id] = vifs[0]['network_id'] for fixed_ip_ref in ips_ref: instance_id = fixed_ip_ref['instance_id'] try: instance_ref = db.instance_get(context, instance_id) except exception.InstanceNotFound: msg = _("Instance %(instance_id)s not found") LOG.debug(msg % {'instance_id': instance_id}) continue if instance_id in default_gw_network_node: target_network_id = default_gw_network_node[instance_id] # we don't want default gateway for this fixed ip if target_network_id != fixed_ip_ref['network_id']: hosts.append(_host_dhcp_opts(fixed_ip_ref, instance_ref)) return '\n'.join(hosts)
def deallocate_for_instance(self, context, **kwargs): """Called when a VM is terminated. Loop through each virtual interface in the Nova DB and remove the Quantum port and clear the IP allocation using the IPAM. Finally, remove the virtual interfaces from the Nova DB. """ instance_id = kwargs.get('instance_id') project_id = kwargs.pop('project_id', None) admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif in vifs: network = db.network_get(admin_context, vif['network_id']) self.deallocate_port(vif['uuid'], network['uuid'], project_id, instance_id) ipam_tenant_id = self.deallocate_ip_address(context, network['uuid'], project_id, vif, instance_id) if FLAGS.quantum_use_dhcp: self.update_dhcp(context, ipam_tenant_id, network, vif, project_id) db.virtual_interface_delete(admin_context, vif['id'])
def get_network_info(instance): # TODO(tr3buchet): this function needs to go away! network info # MUST be passed down from compute # TODO(adiantum) If we will keep this function # we should cache network_info admin_context = context.get_admin_context() try: fixed_ips = db.fixed_ip_get_by_instance(admin_context, instance['id']) except exception.FixedIpNotFoundForInstance: fixed_ips = [] vifs = db.virtual_interface_get_by_instance(admin_context, instance['id']) flavor = db.instance_type_get(admin_context, instance['instance_type_id']) network_info = [] for vif in vifs: network = vif['network'] # determine which of the instance's IPs belong to this network network_ips = [ fixed_ip['address'] for fixed_ip in fixed_ips if fixed_ip['network_id'] == network['id'] ] def ip_dict(ip): return {'ip': ip, 'netmask': network['netmask'], 'enabled': '1'} def ip6_dict(): prefix = network['cidr_v6'] mac = vif['address'] project_id = instance['project_id'] return { 'ip': ipv6.to_global(prefix, mac, project_id), 'netmask': network['netmask_v6'], 'enabled': '1' } mapping = { 'label': network['label'], 'gateway': network['gateway'], 'broadcast': network['broadcast'], 'dhcp_server': network['gateway'], 'mac': vif['address'], 'rxtx_cap': flavor['rxtx_cap'], 'dns': [], 'ips': [ip_dict(ip) for ip in network_ips] } if network['dns1']: mapping['dns'].append(network['dns1']) if network['dns2']: mapping['dns'].append(network['dns2']) if FLAGS.use_ipv6: mapping['ip6s'] = [ip6_dict()] mapping['gateway6'] = network['gateway_v6'] network_info.append((network, mapping)) return network_info
def get_network_info(instance): # TODO(tr3buchet): this function needs to go away! network info # MUST be passed down from compute # TODO(adiantum) If we will keep this function # we should cache network_info admin_context = context.get_admin_context() try: fixed_ips = db.fixed_ip_get_by_instance(admin_context, instance['id']) except exception.FixedIpNotFoundForInstance: fixed_ips = [] vifs = db.virtual_interface_get_by_instance(admin_context, instance['id']) flavor = db.instance_type_get(admin_context, instance['instance_type_id']) network_info = [] for vif in vifs: network = vif['network'] # determine which of the instance's IPs belong to this network network_ips = [fixed_ip['address'] for fixed_ip in fixed_ips if fixed_ip['network_id'] == network['id']] def ip_dict(ip): return { 'ip': ip, 'netmask': network['netmask'], 'enabled': '1'} def ip6_dict(): prefix = network['cidr_v6'] mac = vif['address'] project_id = instance['project_id'] return { 'ip': ipv6.to_global(prefix, mac, project_id), 'netmask': network['netmask_v6'], 'enabled': '1'} mapping = { 'label': network['label'], 'gateway': network['gateway'], 'broadcast': network['broadcast'], 'dhcp_server': network['gateway'], 'mac': vif['address'], 'rxtx_cap': flavor['rxtx_cap'], 'dns': [], 'ips': [ip_dict(ip) for ip in network_ips]} if network['dns1']: mapping['dns'].append(network['dns1']) if network['dns2']: mapping['dns'].append(network['dns2']) if FLAGS.use_ipv6: mapping['ip6s'] = [ip6_dict()] mapping['gateway6'] = network['gateway_v6'] network_info.append((network, mapping)) return network_info
def get_dhcp_opts(context, network_ref): """Get network's hosts config in dhcp-opts format.""" hosts = [] ips_ref = db.network_get_associated_fixed_ips(context, network_ref['id']) if ips_ref: #set of instance ids instance_set = set( [fixed_ip_ref['instance_id'] for fixed_ip_ref in ips_ref]) default_gw_network_node = {} for instance_id in instance_set: vifs = db.virtual_interface_get_by_instance(context, instance_id) if vifs: #offer a default gateway to the first virtual interface default_gw_network_node[instance_id] = vifs[0]['network_id'] for fixed_ip_ref in ips_ref: instance_id = fixed_ip_ref['instance_id'] try: instance_ref = db.instance_get(context, instance_id) except exception.InstanceNotFound: msg = _("Instance %(instance_id)s not found") LOG.debug(msg % {'instance_id': instance_id}) continue if instance_id in default_gw_network_node: target_network_id = default_gw_network_node[instance_id] # we don't want default gateway for this fixed ip if target_network_id != fixed_ip_ref['network_id']: hosts.append(_host_dhcp_opts(fixed_ip_ref, instance_ref)) return '\n'.join(hosts)
def _from_phy_host(instance_id, tenant_id): LOG.debug('_from_phy_host(instance_id=%s,tenant_id=%s)', instance_id, tenant_id) ctx = context.get_admin_context() info = [] for vif in db.virtual_interface_get_by_instance(ctx, instance_id): LOG.debug('vif=%s', vif.__dict__) mac = vif.address network_ref = vif.network if not network_ref: LOG.warn('vif.network is None') continue LOG.debug('vif.network=%s', network_ref.__dict__) network_uuid = network_ref.uuid if not network_uuid: LOG.warn('network_uuid is None') continue vifinfo_uuid = _get_vifinfo_uuid(tenant_id, vif.uuid) LOG.debug('vifinfo_uuid=%s', vifinfo_uuid) if not vifinfo_uuid: continue fixed_ip = db.fixed_ip_get_by_virtual_interface(ctx, vif.id) if not fixed_ip: LOG.warn('fixed_ip is None') continue addrs = [ fip.address for fip in fixed_ip ] info.append( (vifinfo_uuid, network_uuid, mac, addrs) ) LOG.debug('_from_phy_host(instance_id=%s,tenant_id=%s) end: info=%s', instance_id, tenant_id, info) return info
def test_update_dhcp_for_nw01(self): self.flags(use_single_default_gateway=True) def get_vif(_context, vif_id): return vifs[vif_id] def get_instance(_context, instance_id): return instances[instance_id] self.stubs.Set(db, 'virtual_interface_get', get_vif) self.stubs.Set(db, 'instance_get', get_instance) self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') self.mox.StubOutWithMock(self.driver, 'write_to_file') self.mox.StubOutWithMock(self.driver, 'ensure_path') self.mox.StubOutWithMock(os, 'chmod') db.network_get_associated_fixed_ips(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([fixed_ips[1], fixed_ips[2]]) db.network_get_associated_fixed_ips(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([fixed_ips[1], fixed_ips[2]]) db.virtual_interface_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([vifs[0], vifs[1]]) db.virtual_interface_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([vifs[2], vifs[3]]) self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) self.driver.ensure_path(mox.IgnoreArg()) self.driver.ensure_path(mox.IgnoreArg()) self.driver.ensure_path(mox.IgnoreArg()) self.driver.ensure_path(mox.IgnoreArg()) self.driver.ensure_path(mox.IgnoreArg()) self.driver.ensure_path(mox.IgnoreArg()) self.driver.ensure_path(mox.IgnoreArg()) os.chmod(mox.IgnoreArg(), mox.IgnoreArg()) os.chmod(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.driver.update_dhcp(self.context, "eth0", networks[0])
def test_get_instance_nw_info(self): self.mox.StubOutWithMock(db, "fixed_ip_get_by_instance") self.mox.StubOutWithMock(db, "virtual_interface_get_by_instance") self.mox.StubOutWithMock(db, "instance_type_get_by_id") db.fixed_ip_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(fixed_ips) db.virtual_interface_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(vifs) db.instance_type_get_by_id(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(flavor) self.mox.ReplayAll() nw_info = self.network.get_instance_nw_info(None, 0, 0) self.assertTrue(nw_info) for i, nw in enumerate(nw_info): i8 = i + 8 check = { "bridge": "fa%s" % i, "cidr": "192.168.%s.0/24" % i, "cidr_v6": "2001:db%s::/64" % i8, "id": i, "injected": "DONTCARE", } self.assertDictMatch(nw[0], check) check = { "broadcast": "192.168.%s.255" % i, "dns": "DONTCARE", "gateway": "192.168.%s.1" % i, "gateway6": "2001:db%s::1" % i8, "ip6s": "DONTCARE", "ips": "DONTCARE", "label": "test%s" % i, "mac": "DE:AD:BE:EF:00:0%s" % i, "rxtx_cap": "DONTCARE", } self.assertDictMatch(nw[1], check) check = [{"enabled": "DONTCARE", "ip": "2001:db%s::dcad:beff:feef:%s" % (i8, i), "netmask": "64"}] self.assertDictListMatch(nw[1]["ip6s"], check) check = [{"enabled": "1", "ip": "192.168.%s.100" % i, "netmask": "255.255.255.0"}] self.assertDictListMatch(nw[1]["ips"], check)
def test_update_dhcp_for_nw00(self): self.flags(use_single_default_gateway=True) self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') self.mox.StubOutWithMock(self.driver, 'write_to_file') self.mox.StubOutWithMock(self.driver, 'ensure_path') self.mox.StubOutWithMock(os, 'chmod') db.network_get_associated_fixed_ips(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([fixed_ips[0], fixed_ips[3]]) db.network_get_associated_fixed_ips(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([fixed_ips[0], fixed_ips[3]]) db.virtual_interface_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([vifs[0], vifs[1]]) db.virtual_interface_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([vifs[2], vifs[3]]) self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) self.driver.write_to_file(mox.IgnoreArg(), mox.IgnoreArg()) self.driver.ensure_path(mox.IgnoreArg()) self.driver.ensure_path(mox.IgnoreArg()) self.driver.ensure_path(mox.IgnoreArg()) self.driver.ensure_path(mox.IgnoreArg()) self.driver.ensure_path(mox.IgnoreArg()) self.driver.ensure_path(mox.IgnoreArg()) self.driver.ensure_path(mox.IgnoreArg()) os.chmod(mox.IgnoreArg(), mox.IgnoreArg()) os.chmod(mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.driver.update_dhcp(None, "eth0", networks[0])
def test_multi_nic(self): """ Multinic - Verify that nics as specified in the database are created in the guest """ vifs = db.virtual_interface_get_by_instance(context.get_admin_context(), instance_info.local_id) for vif in vifs: fixed_ip = db.fixed_ip_get_by_virtual_interface(context.get_admin_context(), vif['id']) vz_ip = get_vz_ip_for_device(instance_info.local_id, vif['network']['bridge_interface']) assert_equal(vz_ip, fixed_ip[0]['address'])
def deallocate_for_instance(self, context, **kwargs): """Called when a VM is terminated. Loop through each virtual interface in the Nova DB and remove the Quantum port and clear the IP allocation using the IPAM. Finally, remove the virtual interfaces from the Nova DB. """ instance_id = kwargs.get("instance_id") project_id = kwargs.pop("project_id", None) admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif_ref in vifs: interface_id = vif_ref["uuid"] q_tenant_id = project_id network_ref = db.network_get(admin_context, vif_ref["network_id"]) net_id = network_ref["uuid"] # port deallocate block try: port_id = None port_id = self.q_conn.get_port_by_attachment(q_tenant_id, net_id, interface_id) if not port_id: q_tenant_id = FLAGS.quantum_default_tenant_id port_id = self.q_conn.get_port_by_attachment(q_tenant_id, net_id, interface_id) if not port_id: LOG.error("Unable to find port with attachment: %s" % (interface_id)) else: self.q_conn.detach_and_delete_port(q_tenant_id, net_id, port_id) except: # except anything so the rest of deallocate can succeed msg = _("port deallocation failed for instance: " "|%(instance_id)s|, port_id: |%(port_id)s|") LOG.critical(msg % locals()) # ipam deallocation block try: ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context, net_id, vif_ref["uuid"], project_id) self.ipam.deallocate_ips_by_vif(context, ipam_tenant_id, net_id, vif_ref) db.virtual_interface_delete(admin_context, vif_ref["id"]) # If DHCP is enabled on this network then we need to update the # leases and restart the server. if FLAGS.quantum_use_dhcp: self.update_dhcp(context, ipam_tenant_id, network_ref, vif_ref, project_id) except: # except anything so the rest of deallocate can succeed vif_uuid = vif_ref["uuid"] msg = _("ipam deallocation failed for instance: " "|%(instance_id)s|, vif_uuid: |%(vif_uuid)s|") LOG.critical(msg % locals())
def deallocate_for_instance(self, context, **kwargs): """Called when a VM is terminated. Loop through each virtual interface in the Nova DB and remove the Quantum port and clear the IP allocation using the IPAM. Finally, remove the virtual interfaces from the Nova DB. """ instance_id = kwargs.get('instance_id') project_id = kwargs.pop('project_id', None) admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif_ref in vifs: interface_id = vif_ref['uuid'] q_tenant_id = project_id network_ref = db.network_get(admin_context, vif_ref['network_id']) net_id = network_ref['uuid'] port_id = self.q_conn.get_port_by_attachment(q_tenant_id, net_id, interface_id) if not port_id: q_tenant_id = FLAGS.quantum_default_tenant_id port_id = self.q_conn.get_port_by_attachment( q_tenant_id, net_id, interface_id) if not port_id: LOG.error("Unable to find port with attachment: %s" % (interface_id)) else: self.q_conn.detach_and_delete_port(q_tenant_id, net_id, port_id) ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context, net_id, vif_ref['uuid'], project_id) self.ipam.deallocate_ips_by_vif(context, ipam_tenant_id, net_id, vif_ref) # If DHCP is enabled on this network then we need to update the # leases and restart the server. if FLAGS.quantum_use_dhcp: self.update_dhcp(context, ipam_tenant_id, network_ref, vif_ref, project_id) try: db.virtual_interface_delete_by_instance(admin_context, instance_id) except exception.InstanceNotFound: LOG.error(_("Attempted to deallocate non-existent instance: %s" % (instance_id)))
def test_multi_nic(self): """ Multinic - Verify that nics as specified in the database are created in the guest """ admin_context = context.get_admin_context() vifs = db.virtual_interface_get_by_instance(admin_context(), instance_info.local_id) for vif in vifs: fixed_ip = db.fixed_ip_get_by_virtual_interface( admin_context(), vif['id']) vz_ip = get_vz_ip_for_device(instance_info.local_id, vif['network']['bridge_interface']) assert_equal(vz_ip, fixed_ip[0]['address'])
def test_update_dhcp_for_nw01(self): self.flags(use_single_default_gateway=True) self.mox.StubOutWithMock(db, 'network_get_associated_fixed_ips') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance') db.network_get_associated_fixed_ips(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([fixed_ips[1], fixed_ips[2]]) db.network_get_associated_fixed_ips(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([fixed_ips[1], fixed_ips[2]]) db.virtual_interface_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([vifs[0], vifs[1]]) db.virtual_interface_get_by_instance(mox.IgnoreArg(), mox.IgnoreArg())\ .AndReturn([vifs[2], vifs[3]]) self.mox.ReplayAll() self.driver.update_dhcp(None, "eth0", networks[0])
def get_instance_nw_info(self, context, instance_id, instance_uuid, instance_type_id, host): """This method is used by compute to fetch all network data that should be used when creating the VM. The method simply loops through all virtual interfaces stored in the nova DB and queries the IPAM lib to get the associated IP data. The format of returned data is 'defined' by the initial set of NetworkManagers found in nova/network/manager.py . Ideally this 'interface' will be more formally defined in the future. """ admin_context = context.elevated() project_id = context.project_id vifs = db.virtual_interface_get_by_instance(context, instance_id) instance_type = instance_types.get_instance_type(instance_type_id) net_tenant_dict = dict((net_id, tenant_id) for (net_id, tenant_id) in self.ipam.get_project_and_global_net_ids( context, project_id)) networks = {} for vif in vifs: if vif.get('network_id') is not None: network = db.network_get(admin_context, vif['network_id']) net_tenant_id = net_tenant_dict[network['uuid']] if net_tenant_id is None: net_tenant_id = FLAGS.quantum_default_tenant_id network = {'id': network['id'], 'uuid': network['uuid'], 'bridge': 'ovs_flag', 'label': self.q_conn.get_network_name(net_tenant_id, network['uuid']), 'project_id': net_tenant_id} networks[vif['uuid']] = network # update instance network cache and return network_info nw_info = self.build_network_info_model(context, vifs, networks, instance_type, host) db.instance_info_cache_update(context, instance_uuid, {'network_info': nw_info.as_cache()}) return nw_info
def get_instance_nw_info(self, context, instance_id, instance_uuid, rxtx_factor, host, **kwargs): """This method is used by compute to fetch all network data that should be used when creating the VM. The method simply loops through all virtual interfaces stored in the nova DB and queries the IPAM lib to get the associated IP data. The format of returned data is 'defined' by the initial set of NetworkManagers found in nova/network/manager.py . Ideally this 'interface' will be more formally defined in the future. """ project_id = kwargs['project_id'] vifs = db.virtual_interface_get_by_instance(context, instance_id) net_tenant_dict = dict( (net_id, tenant_id) for (net_id, tenant_id) in self.ipam.get_project_and_global_net_ids(context, project_id)) networks = {} for vif in vifs: if vif.get('network_id') is not None: network = db.network_get(context.elevated(), vif['network_id']) net_tenant_id = net_tenant_dict[network['uuid']] if net_tenant_id is None: net_tenant_id = FLAGS.quantum_default_tenant_id network = { 'id': network['id'], 'uuid': network['uuid'], 'bridge': '', # Quantum ignores this field 'label': network['label'], 'injected': FLAGS.flat_injected, 'project_id': net_tenant_id } networks[vif['uuid']] = network # update instance network cache and return network_info nw_info = self.build_network_info_model(context, vifs, networks, rxtx_factor, host) db.instance_info_cache_update(context, instance_uuid, {'network_info': nw_info.as_cache()}) return nw_info
def get_instance_nw_info(self, context, instance_id, instance_uuid, rxtx_factor, host): """This method is used by compute to fetch all network data that should be used when creating the VM. The method simply loops through all virtual interfaces stored in the nova DB and queries the IPAM lib to get the associated IP data. The format of returned data is 'defined' by the initial set of NetworkManagers found in nova/network/manager.py . Ideally this 'interface' will be more formally defined in the future. """ admin_context = context.elevated() project_id = context.project_id vifs = db.virtual_interface_get_by_instance(context, instance_id) net_tenant_dict = dict( (net_id, tenant_id) for (net_id, tenant_id) in self.ipam.get_project_and_global_net_ids(context, project_id) ) networks = {} for vif in vifs: if vif.get("network_id") is not None: network = db.network_get(admin_context, vif["network_id"]) net_tenant_id = net_tenant_dict[network["uuid"]] if net_tenant_id is None: net_tenant_id = FLAGS.quantum_default_tenant_id network = { "id": network["id"], "uuid": network["uuid"], "bridge": "", # Quantum ignores this field "label": network["label"], "project_id": net_tenant_id, } networks[vif["uuid"]] = network # update instance network cache and return network_info nw_info = self.build_network_info_model(context, vifs, networks, rxtx_factor, host) db.instance_info_cache_update(context, instance_uuid, {"network_info": nw_info.as_cache()}) return nw_info
def deallocate_for_instance(self, context, **kwargs): """Called when a VM is terminated. Loop through each virtual interface in the Nova DB and remove the Quantum port and clear the IP allocation using the IPAM. Finally, remove the virtual interfaces from the Nova DB. """ instance_id = kwargs.get('instance_id') project_id = kwargs.pop('project_id', None) admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif in vifs: network = db.network_get(admin_context, vif['network_id']) self.deallocate_port(vif['uuid'], network['uuid'], project_id, instance_id) ipam_tenant_id = self.deallocate_ip_address( context, network['uuid'], project_id, vif, instance_id) if FLAGS.quantum_use_dhcp: if network['host'] == self.host: self.update_dhcp(context, ipam_tenant_id, network, vif, project_id) else: topic = self.db.queue_get_for(context, FLAGS.network_topic, network['host']) rpc.call( context, topic, { 'method': 'update_dhcp', 'args': { 'ipam_tenant_id': ipam_tenant_id, 'network_ref': network, 'vif_ref': vif, 'project_id': network['project_id'] } }) db.virtual_interface_delete(admin_context, vif['id'])
def deallocate_for_instance(self, context, **kwargs): """Called when a VM is terminated. Loop through each virtual interface in the Nova DB and remove the Quantum port and clear the IP allocation using the IPAM. Finally, remove the virtual interfaces from the Nova DB. """ instance_id = kwargs.get("instance_id") project_id = kwargs.pop("project_id", None) admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif in vifs: network = db.network_get(admin_context, vif["network_id"]) self.deallocate_port(vif["uuid"], network["uuid"], project_id, instance_id) ipam_tenant_id = self.deallocate_ip_address(context, network["uuid"], project_id, vif, instance_id) if FLAGS.quantum_use_dhcp: if network["host"] == self.host: self.update_dhcp(context, ipam_tenant_id, network, vif, project_id) else: topic = self.db.queue_get_for(context, FLAGS.network_topic, network["host"]) rpc.call( context, topic, { "method": "update_dhcp", "args": { "ipam_tenant_id": ipam_tenant_id, "network_ref": network, "vif_ref": vif, "project_id": network["project_id"], }, }, ) db.virtual_interface_delete(admin_context, vif["id"])
def deallocate_for_instance(self, context, **kwargs): """Called when a VM is terminated. Loop through each virtual interface in the Nova DB and remove the Quantum port and clear the IP allocation using the IPAM. Finally, remove the virtual interfaces from the Nova DB. """ instance_id = kwargs.get('instance_id') project_id = kwargs.pop('project_id', None) admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif_ref in vifs: interface_id = vif_ref['uuid'] q_tenant_id = project_id ipam_tenant_id = project_id (net_id, port_id) = self.q_conn.get_port_by_attachment(q_tenant_id, interface_id) if not net_id: q_tenant_id = FLAGS.quantum_default_tenant_id ipam_tenant_id = None (net_id, port_id) = self.q_conn.get_port_by_attachment( q_tenant_id, interface_id) if not net_id: LOG.error("Unable to find port with attachment: %s" % (interface_id)) continue self.q_conn.detach_and_delete_port(q_tenant_id, net_id, port_id) self.ipam.deallocate_ips_by_vif(context, ipam_tenant_id, net_id, vif_ref) try: db.virtual_interface_delete_by_instance(admin_context, instance_id) except exception.InstanceNotFound: LOG.error(_("Attempted to deallocate non-existent instance: %s" % (instance_id)))
def get_dhcp_opts(context, network_ref): """Get network's hosts config in dhcp-opts format.""" hosts = [] ips_ref = db.network_get_associated_fixed_ips(context, network_ref["id"]) if ips_ref: # set of instance ids instance_set = set([fixed_ip_ref["instance_id"] for fixed_ip_ref in ips_ref]) default_gw_network_node = {} for instance_id in instance_set: vifs = db.virtual_interface_get_by_instance(context, instance_id) if vifs: # offer a default gateway to the first virtual interface default_gw_network_node[instance_id] = vifs[0]["network_id"] for fixed_ip_ref in ips_ref: instance_id = fixed_ip_ref["instance_id"] if instance_id in default_gw_network_node: target_network_id = default_gw_network_node[instance_id] # we don't want default gateway for this fixed ip if target_network_id != fixed_ip_ref["network_id"]: hosts.append(_host_dhcp_opts(fixed_ip_ref)) return "\n".join(hosts)
def deallocate_for_instance(self, context, **kwargs): """Called when a VM is terminated. Loop through each virtual interface in the Nova DB and remove the Quantum port and clear the IP allocation using the IPAM. Finally, remove the virtual interfaces from the Nova DB. """ instance_id = kwargs.get('instance_id') project_id = kwargs.pop('project_id', None) admin_context = context.elevated() instance = db.instance_get(context, instance_id) vifs = db.virtual_interface_get_by_instance(admin_context, instance['uuid']) for vif in vifs: network = db.network_get(admin_context, vif['network_id']) self.deallocate_port(vif['uuid'], network['uuid'], project_id, instance_id) ipam_tenant_id = self.deallocate_ip_address(context, network['uuid'], project_id, vif, instance_id) if FLAGS.quantum_use_dhcp: if network['host'] == self.host: self.update_dhcp(context, ipam_tenant_id, network, vif, project_id) else: topic = rpc.queue_get_for(context, FLAGS.network_topic, network['host']) rpc.call(context, topic, {'method': 'update_dhcp', 'args': {'ipam_tenant_id': ipam_tenant_id, 'network_ref': network, 'vif_ref': vif, 'project_id': network['project_id']}}) db.virtual_interface_delete(admin_context, vif['id'])
def deallocate_for_instance(self, context, **kwargs): """Called when a VM is terminated. Loop through each virtual interface in the Nova DB and remove the Quantum port and clear the IP allocation using the IPAM. Finally, remove the virtual interfaces from the Nova DB. """ instance_id = kwargs.get('instance_id') project_id = kwargs.pop('project_id', None) admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif_ref in vifs: interface_id = vif_ref['uuid'] q_tenant_id = project_id ipam_tenant_id = project_id (net_id, port_id) = self.q_conn.get_port_by_attachment( q_tenant_id, interface_id) if not net_id: q_tenant_id = FLAGS.quantum_default_tenant_id ipam_tenant_id = None (net_id, port_id) = self.q_conn.get_port_by_attachment( q_tenant_id, interface_id) if not net_id: LOG.error("Unable to find port with attachment: %s" % (interface_id)) continue self.q_conn.detach_and_delete_port(q_tenant_id, net_id, port_id) self.ipam.deallocate_ips_by_vif(context, ipam_tenant_id, net_id, vif_ref) try: db.virtual_interface_delete_by_instance(admin_context, instance_id) except exception.InstanceNotFound: LOG.error( _("Attempted to deallocate non-existent instance: %s" % (instance_id)))
def get_instance_nw_info(self, context, instance_id, instance_type_id, host): """This method is used by compute to fetch all network data that should be used when creating the VM. The method simply loops through all virtual interfaces stored in the nova DB and queries the IPAM lib to get the associated IP data. The format of returned data is 'defined' by the initial set of NetworkManagers found in nova/network/manager.py . Ideally this 'interface' will be more formally defined in the future. """ network_info = [] instance = db.instance_get(context, instance_id) project_id = instance.project_id admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif in vifs: net = db.network_get(admin_context, vif["network_id"]) net_id = net["uuid"] if not net_id: # TODO(bgh): We need to figure out a way to tell if we # should actually be raising this exception or not. # In the case that a VM spawn failed it may not have # attached the vif and raising the exception here # prevents deletion of the VM. In that case we should # probably just log, continue, and move on. raise Exception(_("No network for for virtual interface %s") % vif["uuid"]) ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context, net_id, vif["uuid"], project_id) v4_subnets, v6_subnets = self.ipam.get_subnets_by_net_id(context, ipam_tenant_id, net_id, vif["uuid"]) v4_ips = self.ipam.get_v4_ips_by_interface(context, net_id, vif["uuid"], project_id=ipam_tenant_id) v6_ips = self.ipam.get_v6_ips_by_interface(context, net_id, vif["uuid"], project_id=ipam_tenant_id) def _make_network_dict_and_info(subnet, ips, net, vif, v6=False): def ip_dict(ip, subnet): return {"ip": ip, "netmask": subnet["netmask"], "enabled": "1"} network_dict = {"injected": True, "bridge": net["bridge"], "multi_host": False} q_tenant_id = project_id or FLAGS.quantum_default_tenant_id info = { "label": self.q_conn.get_network_name(q_tenant_id, net["uuid"]), "gateway": subnet["gateway"], "dhcp_server": subnet["gateway"], "broadcast": subnet["broadcast"], "mac": vif["address"], "vif_uuid": vif["uuid"], "dns": [], } if not v6: network_dict["cidr"] = subnet["cidr"] info["ips"] = [ip_dict(ip, subnet) for ip in ips] else: if subnet["cidr"]: network_dict["cidr_v6"] = subnet["cidr"] info["ip6s"] = [ip_dict(ip, subnet) for ip in ips] if subnet["gateway"]: info["gateway6"] = subnet["gateway"] dns_dict = {} for k in ["dns1", "dns2"]: if k in subnet and subnet[k]: dns_dict[subnet[k]] = None info["dns"] = [d for d in dns_dict.keys()] return network_dict, info for subnet in v4_subnets: network_dict, info = _make_network_dict_and_info(subnet, v4_ips, net, vif, False) network_info.append((network_dict, info)) for subnet in v6_subnets: network_dict, info = _make_network_dict_and_info(subnet, v6_ips, net, vif, True) network_info.append((network_dict, info)) return network_info
def _db_virtual_interface_get_by_instance(context, instance_uuid, use_slave=False): return db.virtual_interface_get_by_instance(context, instance_uuid)
def get_instance_nw_info(self, context, instance_id, instance_type_id, host): """This method is used by compute to fetch all network data that should be used when creating the VM. The method simply loops through all virtual interfaces stored in the nova DB and queries the IPAM lib to get the associated IP data. The format of returned data is 'defined' by the initial set of NetworkManagers found in nova/network/manager.py . Ideally this 'interface' will be more formally defined in the future. """ network_info = [] instance = db.instance_get(context, instance_id) project_id = instance.project_id admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif in vifs: q_tenant_id = project_id ipam_tenant_id = project_id net_id, port_id = self.q_conn.get_port_by_attachment(q_tenant_id, vif['uuid']) if not net_id: q_tenant_id = FLAGS.quantum_default_tenant_id ipam_tenant_id = None net_id, port_id = self.q_conn.get_port_by_attachment( q_tenant_id, vif['uuid']) if not net_id: # TODO(bgh): We need to figure out a way to tell if we # should actually be raising this exception or not. # In the case that a VM spawn failed it may not have # attached the vif and raising the exception here # prevents deletion of the VM. In that case we should # probably just log, continue, and move on. raise Exception(_("No network for for virtual interface %s") % vif['uuid']) (v4_subnet, v6_subnet) = self.ipam.get_subnets_by_net_id(context, ipam_tenant_id, net_id) v4_ips = self.ipam.get_v4_ips_by_interface(context, net_id, vif['uuid'], project_id=ipam_tenant_id) v6_ips = self.ipam.get_v6_ips_by_interface(context, net_id, vif['uuid'], project_id=ipam_tenant_id) quantum_net_id = v4_subnet['network_id'] or v6_subnet['network_id'] def ip_dict(ip, subnet): return { "ip": ip, "netmask": subnet["netmask"], "enabled": "1"} network_dict = { 'cidr': v4_subnet['cidr'], 'injected': True, 'multi_host': False} info = { 'gateway': v4_subnet['gateway'], 'dhcp_server': v4_subnet['gateway'], 'broadcast': v4_subnet['broadcast'], 'mac': vif['address'], 'vif_uuid': vif['uuid'], 'dns': [], 'ips': [ip_dict(ip, v4_subnet) for ip in v4_ips]} if v6_subnet: if v6_subnet['cidr']: network_dict['cidr_v6'] = v6_subnet['cidr'] info['ip6s'] = [ip_dict(ip, v6_subnet) for ip in v6_ips] if v6_subnet['gateway']: info['gateway6'] = v6_subnet['gateway'] dns_dict = {} for s in [v4_subnet, v6_subnet]: for k in ['dns1', 'dns2']: if s and s[k]: dns_dict[s[k]] = None info['dns'] = [d for d in dns_dict.keys()] network_info.append((network_dict, info)) return network_info
def get_by_instance_uuid(cls, context, instance_uuid, use_slave=False): db_vifs = db.virtual_interface_get_by_instance(context, instance_uuid, use_slave=use_slave) return base.obj_make_list(context, cls(context), objects.VirtualInterface, db_vifs)
def get_instance_nw_info(self, context, instance_id, instance_type_id, host): """This method is used by compute to fetch all network data that should be used when creating the VM. The method simply loops through all virtual interfaces stored in the nova DB and queries the IPAM lib to get the associated IP data. The format of returned data is 'defined' by the initial set of NetworkManagers found in nova/network/manager.py . Ideally this 'interface' will be more formally defined in the future. """ network_info = [] instance = db.instance_get(context, instance_id) project_id = instance.project_id admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif in vifs: q_tenant_id = project_id ipam_tenant_id = project_id net_id, port_id = self.q_conn.get_port_by_attachment( q_tenant_id, vif['uuid']) if not net_id: q_tenant_id = FLAGS.quantum_default_tenant_id ipam_tenant_id = None net_id, port_id = self.q_conn.get_port_by_attachment( q_tenant_id, vif['uuid']) if not net_id: # TODO(bgh): We need to figure out a way to tell if we # should actually be raising this exception or not. # In the case that a VM spawn failed it may not have # attached the vif and raising the exception here # prevents deletion of the VM. In that case we should # probably just log, continue, and move on. raise Exception( _("No network for for virtual interface %s") % vif['uuid']) (v4_subnet, v6_subnet) = self.ipam.get_subnets_by_net_id( context, ipam_tenant_id, net_id) v4_ips = self.ipam.get_v4_ips_by_interface( context, net_id, vif['uuid'], project_id=ipam_tenant_id) v6_ips = self.ipam.get_v6_ips_by_interface( context, net_id, vif['uuid'], project_id=ipam_tenant_id) quantum_net_id = v4_subnet['network_id'] or v6_subnet['network_id'] def ip_dict(ip, subnet): return {"ip": ip, "netmask": subnet["netmask"], "enabled": "1"} network_dict = { 'cidr': v4_subnet['cidr'], 'injected': True, 'multi_host': False } q_tenant_id = project_id or FLAGS.quantum_default_tenant_id info = { 'label': self.q_conn.get_network_name(q_tenant_id, net_id), 'gateway': v4_subnet['gateway'], 'dhcp_server': v4_subnet['gateway'], 'broadcast': v4_subnet['broadcast'], 'mac': vif['address'], 'vif_uuid': vif['uuid'], 'dns': [], 'ips': [ip_dict(ip, v4_subnet) for ip in v4_ips] } if v6_subnet: if v6_subnet['cidr']: network_dict['cidr_v6'] = v6_subnet['cidr'] info['ip6s'] = [ip_dict(ip, v6_subnet) for ip in v6_ips] if v6_subnet['gateway']: info['gateway6'] = v6_subnet['gateway'] dns_dict = {} for s in [v4_subnet, v6_subnet]: for k in ['dns1', 'dns2']: if s and s[k]: dns_dict[s[k]] = None info['dns'] = [d for d in dns_dict.keys()] network_info.append((network_dict, info)) return network_info