def test_add_fixed_ip_instance_without_vpn_requested_networks(self): self.mox.StubOutWithMock(db, 'network_get') self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'fixed_ip_update') db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) db.instance_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'security_groups': [{'id': 0}]}) db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('192.168.0.101') db.network_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(networks[0]) self.mox.ReplayAll() self.network.add_fixed_ip_to_instance(self.context, 1, HOST, networks[0]['id'])
def test_instance_dns(self): fixedip = '192.168.0.101' self.mox.StubOutWithMock(db, 'network_get') self.mox.StubOutWithMock(db, 'network_update') self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock(db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'fixed_ip_update') db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) db.instance_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'security_groups': [{'id': 0}]}) db.instance_get(self.context, 1).AndReturn({'display_name': HOST}) db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(fixedip) db.network_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(networks[0]) db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.network.add_fixed_ip_to_instance(self.context, 1, HOST, networks[0]['id']) addresses = self.network.instance_dns_manager.get_entries_by_name(HOST) self.assertEqual(len(addresses), 1) self.assertEqual(addresses[0], fixedip)
def test_add_fixed_ip_instance_without_vpn_requested_networks(self): self.mox.StubOutWithMock(db, 'network_get') self.mox.StubOutWithMock(db, 'network_update') self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock( db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'fixed_ip_update') db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) db.virtual_interface_get_by_instance_and_network( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) db.instance_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( {'security_groups': [{ 'id': 0 }]}) db.instance_get(self.context, 1).AndReturn({'display_name': HOST}) db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn('192.168.0.101') db.network_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(networks[0]) db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.network.add_fixed_ip_to_instance(self.context, 1, HOST, networks[0]['id'])
def test_instance_dns(self): fixedip = '192.168.0.101' self.mox.StubOutWithMock(db, 'network_get') self.mox.StubOutWithMock(db, 'network_update') self.mox.StubOutWithMock(db, 'fixed_ip_associate_pool') self.mox.StubOutWithMock(db, 'instance_get') self.mox.StubOutWithMock( db, 'virtual_interface_get_by_instance_and_network') self.mox.StubOutWithMock(db, 'fixed_ip_update') db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) db.virtual_interface_get_by_instance_and_network( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({'id': 0}) db.instance_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( {'security_groups': [{ 'id': 0 }]}) db.instance_get(self.context, 1).AndReturn({'display_name': HOST}) db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(fixedip) db.network_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(networks[0]) db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.network.add_fixed_ip_to_instance(self.context, 1, HOST, networks[0]['id']) addresses = self.network.instance_dns_manager.get_entries_by_name(HOST) self.assertEqual(len(addresses), 1) self.assertEqual(addresses[0], fixedip)
def _from_bm_node(instance_id, tenant_id): LOG.debug('_from_bm_node(instance_id=%s,tenant_id=%s)', instance_id, tenant_id) ctx = context.get_admin_context() info = [] for vif in db.virtual_interface_get_by_instance(ctx, instance_id): LOG.debug('vif=%s', vif.__dict__) mac = vif.address network_ref = None if vif['network_id']: network_ref = db.network_get(ctx, vif['network_id']) if not network_ref: LOG.warn('vif.network is None') continue LOG.debug('vif.network=%s', network_ref.__dict__) network_uuid = network_ref.uuid if not network_uuid: LOG.warn('network_uuid is None') continue vifinfo_uuid = _get_vifinfo_uuid(tenant_id, network_uuid, vif.uuid) LOG.debug('vifinfo_uuid=%s', vifinfo_uuid) if not vifinfo_uuid: continue fixed_ips = db.fixed_ips_by_virtual_interface(ctx, vif.id) if not fixed_ips: LOG.warn('fixed_ip is None') continue addrs = [fip.address for fip in fixed_ips] info.append((vifinfo_uuid, network_uuid, mac, addrs)) LOG.debug('_from_bm_node(instance_id=%s,tenant_id=%s) end: info=%s', instance_id, tenant_id, info) return info
def update_dhcp(context, network_id): """(Re)starts a dnsmasq server for a given network if a dnsmasq instance is already running then send a HUP signal causing it to reload, otherwise spawn a new instance """ network_ref = db.network_get(context, network_id) conffile = _dhcp_file(network_ref["bridge"], "conf") with open(conffile, "w") as f: f.write(get_dhcp_hosts(context, network_id)) # Make sure dnsmasq can actually read it (it setuid()s to "nobody") os.chmod(conffile, 0644) pid = _dnsmasq_pid_for(network_ref["bridge"]) # if dnsmasq is already running, then tell it to reload if pid: out, _err = _execute("cat /proc/%d/cmdline" % pid, check_exit_code=False) if conffile in out: try: _execute("sudo kill -HUP %d" % pid) return except Exception as exc: # pylint: disable-msg=W0703 LOG.debug(_("Hupping dnsmasq threw %s"), exc) else: LOG.debug(_("Pid %d is stale, relaunching dnsmasq"), pid) # FLAGFILE and DNSMASQ_INTERFACE in env env = {"FLAGFILE": FLAGS.dhcpbridge_flagfile, "DNSMASQ_INTERFACE": network_ref["bridge"]} command = _dnsmasq_cmd(network_ref) _execute(command, addl_env=env)
def deallocate_for_instance(self, context, **kwargs): """Called when a VM is terminated. Loop through each virtual interface in the Nova DB and remove the Quantum port and clear the IP allocation using the IPAM. Finally, remove the virtual interfaces from the Nova DB. """ instance_id = kwargs.get('instance_id') project_id = kwargs.pop('project_id', None) admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif in vifs: network = db.network_get(admin_context, vif['network_id']) self.deallocate_port(vif['uuid'], network['uuid'], project_id, instance_id) ipam_tenant_id = self.deallocate_ip_address(context, network['uuid'], project_id, vif, instance_id) if FLAGS.quantum_use_dhcp: self.update_dhcp(context, ipam_tenant_id, network, vif, project_id) db.virtual_interface_delete(admin_context, vif['id'])
def update_dhcp(context, network_id): """(Re)starts a dnsmasq server for a given network if a dnsmasq instance is already running then send a HUP signal causing it to reload, otherwise spawn a new instance """ network_ref = db.network_get(context, network_id) conffile = _dhcp_file(network_ref['bridge'], 'conf') with open(conffile, 'w') as f: f.write(get_dhcp_hosts(context, network_id)) # Make sure dnsmasq can actually read it (it setuid()s to "nobody") os.chmod(conffile, 0644) pid = _dnsmasq_pid_for(network_ref['bridge']) # if dnsmasq is already running, then tell it to reload if pid: out, _err = _execute('cat', "/proc/%d/cmdline" % pid, check_exit_code=False) if conffile in out: try: _execute('sudo', 'kill', '-HUP', pid) return except Exception as exc: # pylint: disable=W0703 LOG.debug(_("Hupping dnsmasq threw %s"), exc) else: LOG.debug(_("Pid %d is stale, relaunching dnsmasq"), pid) # FLAGFILE and DNSMASQ_INTERFACE in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, 'DNSMASQ_INTERFACE': network_ref['bridge']} command = _dnsmasq_cmd(network_ref) _execute(*command, addl_env=env)
def test_add_fixed_ip_instance_without_vpn_requested_networks(self): self.mox.StubOutWithMock(db, "network_get") self.mox.StubOutWithMock(db, "fixed_ip_associate_pool") self.mox.StubOutWithMock(db, "instance_get") self.mox.StubOutWithMock(db, "virtual_interface_get_by_instance_and_network") self.mox.StubOutWithMock(db, "fixed_ip_update") db.fixed_ip_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) db.virtual_interface_get_by_instance_and_network(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn( {"id": 0} ) db.instance_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn({"security_groups": [{"id": 0}]}) db.fixed_ip_associate_pool(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).AndReturn("192.168.0.101") db.network_get(mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(networks[0]) self.mox.ReplayAll() self.network.add_fixed_ip_to_instance(self.context, 1, HOST, networks[0]["id"])
def test_network_create_safe(self): ctxt = context.get_admin_context() values = {'host': 'localhost', 'project_id': 'project1'} network = db.network_create_safe(ctxt, values) self.assertNotEqual(None, network.uuid) self.assertEqual(36, len(network.uuid)) db_network = db.network_get(ctxt, network.id) self.assertEqual(network.uuid, db_network.uuid)
def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a project.""" net = {} net['dhcp_start'] = FLAGS.flat_network_dhcp_start self.db.network_update(context, network_id, net) network_ref = db.network_get(context, network_id) self.driver.ensure_bridge(network_ref['bridge'], FLAGS.flat_interface, network_ref) if not FLAGS.fake_network: self.driver.update_dhcp(context, network_id) if (FLAGS.use_ipv6): self.driver.update_ra(context, network_id)
def _on_set_network_host(self, context, network_id): """Called when this host becomes the host for a project.""" net = {} net['dhcp_start'] = FLAGS.flat_network_dhcp_start self.db.network_update(context, network_id, net) network_ref = db.network_get(context, network_id) self.driver.ensure_bridge(network_ref['bridge'], FLAGS.flat_interface, network_ref) if not FLAGS.fake_network: self.driver.update_dhcp(context, network_id) if(FLAGS.use_ipv6): self.driver.update_ra(context, network_id)
def deallocate_for_instance(self, context, **kwargs): """Called when a VM is terminated. Loop through each virtual interface in the Nova DB and remove the Quantum port and clear the IP allocation using the IPAM. Finally, remove the virtual interfaces from the Nova DB. """ instance_id = kwargs.get("instance_id") project_id = kwargs.pop("project_id", None) admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif_ref in vifs: interface_id = vif_ref["uuid"] q_tenant_id = project_id network_ref = db.network_get(admin_context, vif_ref["network_id"]) net_id = network_ref["uuid"] # port deallocate block try: port_id = None port_id = self.q_conn.get_port_by_attachment(q_tenant_id, net_id, interface_id) if not port_id: q_tenant_id = FLAGS.quantum_default_tenant_id port_id = self.q_conn.get_port_by_attachment(q_tenant_id, net_id, interface_id) if not port_id: LOG.error("Unable to find port with attachment: %s" % (interface_id)) else: self.q_conn.detach_and_delete_port(q_tenant_id, net_id, port_id) except: # except anything so the rest of deallocate can succeed msg = _("port deallocation failed for instance: " "|%(instance_id)s|, port_id: |%(port_id)s|") LOG.critical(msg % locals()) # ipam deallocation block try: ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context, net_id, vif_ref["uuid"], project_id) self.ipam.deallocate_ips_by_vif(context, ipam_tenant_id, net_id, vif_ref) db.virtual_interface_delete(admin_context, vif_ref["id"]) # If DHCP is enabled on this network then we need to update the # leases and restart the server. if FLAGS.quantum_use_dhcp: self.update_dhcp(context, ipam_tenant_id, network_ref, vif_ref, project_id) except: # except anything so the rest of deallocate can succeed vif_uuid = vif_ref["uuid"] msg = _("ipam deallocation failed for instance: " "|%(instance_id)s|, vif_uuid: |%(vif_uuid)s|") LOG.critical(msg % locals())
def deallocate_for_instance(self, context, **kwargs): """Called when a VM is terminated. Loop through each virtual interface in the Nova DB and remove the Quantum port and clear the IP allocation using the IPAM. Finally, remove the virtual interfaces from the Nova DB. """ instance_id = kwargs.get('instance_id') project_id = kwargs.pop('project_id', None) admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif_ref in vifs: interface_id = vif_ref['uuid'] q_tenant_id = project_id network_ref = db.network_get(admin_context, vif_ref['network_id']) net_id = network_ref['uuid'] port_id = self.q_conn.get_port_by_attachment(q_tenant_id, net_id, interface_id) if not port_id: q_tenant_id = FLAGS.quantum_default_tenant_id port_id = self.q_conn.get_port_by_attachment( q_tenant_id, net_id, interface_id) if not port_id: LOG.error("Unable to find port with attachment: %s" % (interface_id)) else: self.q_conn.detach_and_delete_port(q_tenant_id, net_id, port_id) ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context, net_id, vif_ref['uuid'], project_id) self.ipam.deallocate_ips_by_vif(context, ipam_tenant_id, net_id, vif_ref) # If DHCP is enabled on this network then we need to update the # leases and restart the server. if FLAGS.quantum_use_dhcp: self.update_dhcp(context, ipam_tenant_id, network_ref, vif_ref, project_id) try: db.virtual_interface_delete_by_instance(admin_context, instance_id) except exception.InstanceNotFound: LOG.error(_("Attempted to deallocate non-existent instance: %s" % (instance_id)))
def test_network_delete_safe(self): ctxt = context.get_admin_context() values = {"host": "localhost", "project_id": "project1"} network = db.network_create_safe(ctxt, values) db_network = db.network_get(ctxt, network.id) values = {"network_id": network["id"], "address": "fake1"} address1 = db.fixed_ip_create(ctxt, values) values = {"network_id": network["id"], "address": "fake2", "allocated": True} address2 = db.fixed_ip_create(ctxt, values) self.assertRaises(exception.NetworkInUse, db.network_delete_safe, ctxt, network["id"]) db.fixed_ip_update(ctxt, address2, {"allocated": False}) network = db.network_delete_safe(ctxt, network["id"]) ctxt = ctxt.elevated(read_deleted="yes") fixed_ip = db.fixed_ip_get_by_address(ctxt, address1) self.assertTrue(fixed_ip["deleted"])
def get_instance_nw_info(self, context, instance_id, instance_uuid, instance_type_id, host): """This method is used by compute to fetch all network data that should be used when creating the VM. The method simply loops through all virtual interfaces stored in the nova DB and queries the IPAM lib to get the associated IP data. The format of returned data is 'defined' by the initial set of NetworkManagers found in nova/network/manager.py . Ideally this 'interface' will be more formally defined in the future. """ admin_context = context.elevated() project_id = context.project_id vifs = db.virtual_interface_get_by_instance(context, instance_id) instance_type = instance_types.get_instance_type(instance_type_id) net_tenant_dict = dict((net_id, tenant_id) for (net_id, tenant_id) in self.ipam.get_project_and_global_net_ids( context, project_id)) networks = {} for vif in vifs: if vif.get('network_id') is not None: network = db.network_get(admin_context, vif['network_id']) net_tenant_id = net_tenant_dict[network['uuid']] if net_tenant_id is None: net_tenant_id = FLAGS.quantum_default_tenant_id network = {'id': network['id'], 'uuid': network['uuid'], 'bridge': 'ovs_flag', 'label': self.q_conn.get_network_name(net_tenant_id, network['uuid']), 'project_id': net_tenant_id} networks[vif['uuid']] = network # update instance network cache and return network_info nw_info = self.build_network_info_model(context, vifs, networks, instance_type, host) db.instance_info_cache_update(context, instance_uuid, {'network_info': nw_info.as_cache()}) return nw_info
def test_network_delete_safe(self): ctxt = context.get_admin_context() values = {'host': 'localhost', 'project_id': 'project1'} network = db.network_create_safe(ctxt, values) db_network = db.network_get(ctxt, network.id) values = {'network_id': network['id'], 'address': 'fake1'} address1 = db.fixed_ip_create(ctxt, values) values = {'network_id': network['id'], 'address': 'fake2', 'allocated': True} address2 = db.fixed_ip_create(ctxt, values) self.assertRaises(exception.NetworkInUse, db.network_delete_safe, ctxt, network['id']) db.fixed_ip_update(ctxt, address2, {'allocated': False}) network = db.network_delete_safe(ctxt, network['id']) ctxt = ctxt.elevated(read_deleted='yes') fixed_ip = db.fixed_ip_get_by_address(ctxt, address1) self.assertTrue(fixed_ip['deleted'])
def get_instance_nw_info(self, context, instance_id, instance_uuid, rxtx_factor, host, **kwargs): """This method is used by compute to fetch all network data that should be used when creating the VM. The method simply loops through all virtual interfaces stored in the nova DB and queries the IPAM lib to get the associated IP data. The format of returned data is 'defined' by the initial set of NetworkManagers found in nova/network/manager.py . Ideally this 'interface' will be more formally defined in the future. """ project_id = kwargs['project_id'] vifs = db.virtual_interface_get_by_instance(context, instance_id) net_tenant_dict = dict( (net_id, tenant_id) for (net_id, tenant_id) in self.ipam.get_project_and_global_net_ids(context, project_id)) networks = {} for vif in vifs: if vif.get('network_id') is not None: network = db.network_get(context.elevated(), vif['network_id']) net_tenant_id = net_tenant_dict[network['uuid']] if net_tenant_id is None: net_tenant_id = FLAGS.quantum_default_tenant_id network = { 'id': network['id'], 'uuid': network['uuid'], 'bridge': '', # Quantum ignores this field 'label': network['label'], 'injected': FLAGS.flat_injected, 'project_id': net_tenant_id } networks[vif['uuid']] = network # update instance network cache and return network_info nw_info = self.build_network_info_model(context, vifs, networks, rxtx_factor, host) db.instance_info_cache_update(context, instance_uuid, {'network_info': nw_info.as_cache()}) return nw_info
def save(self, context): updates = self._get_primitive_changes() if 'netmask_v6' in updates: # NOTE(danms): For some reason, historical code stores the # IPv6 netmask as just the CIDR mask length, so convert that # back here before saving for now. updates['netmask_v6'] = netaddr.IPNetwork( updates['netmask_v6']).netmask set_host = 'host' in updates if set_host: db.network_set_host(context, self.id, updates.pop('host')) if updates: db_network = db.network_update(context, self.id, updates) elif set_host: db_network = db.network_get(context, self.id) else: db_network = None if db_network is not None: self._from_db_object(context, self, db_network)
def update_ra(context, network_id): network_ref = db.network_get(context, network_id) conffile = _ra_file(network_ref['bridge'], 'conf') with open(conffile, 'w') as f: conf_str = """ interface %s { AdvSendAdvert on; MinRtrAdvInterval 3; MaxRtrAdvInterval 10; prefix %s { AdvOnLink on; AdvAutonomous on; }; }; """ % (network_ref['bridge'], network_ref['cidr_v6']) f.write(conf_str) # Make sure radvd can actually read it (it setuid()s to "nobody") os.chmod(conffile, 0644) pid = _ra_pid_for(network_ref['bridge']) # if radvd is already running, then tell it to reload if pid: out, _err = _execute('cat', '/proc/%d/cmdline' % pid, check_exit_code=False) if conffile in out: try: _execute('sudo', 'kill', pid) except Exception as exc: # pylint: disable=W0703 LOG.debug(_("killing radvd threw %s"), exc) else: LOG.debug(_("Pid %d is stale, relaunching radvd"), pid) command = _ra_cmd(network_ref) _execute(*command) db.network_update( context, network_id, {"gateway_v6": utils.get_my_linklocal(network_ref['bridge'])})
def test_network_delete_safe(self): ctxt = context.get_admin_context() values = {'host': 'localhost', 'project_id': 'project1'} network = db.network_create_safe(ctxt, values) db_network = db.network_get(ctxt, network.id) values = {'network_id': network['id'], 'address': 'fake1'} address1 = db.fixed_ip_create(ctxt, values) values = { 'network_id': network['id'], 'address': 'fake2', 'allocated': True } address2 = db.fixed_ip_create(ctxt, values) self.assertRaises(exception.NetworkInUse, db.network_delete_safe, ctxt, network['id']) db.fixed_ip_update(ctxt, address2, {'allocated': False}) network = db.network_delete_safe(ctxt, network['id']) ctxt = ctxt.elevated(read_deleted='yes') fixed_ip = db.fixed_ip_get_by_address(ctxt, address1) self.assertTrue(fixed_ip['deleted'])
def update_ra(context, network_id): network_ref = db.network_get(context, network_id) conffile = _ra_file(network_ref["bridge"], "conf") with open(conffile, "w") as f: conf_str = """ interface %s { AdvSendAdvert on; MinRtrAdvInterval 3; MaxRtrAdvInterval 10; prefix %s { AdvOnLink on; AdvAutonomous on; }; }; """ % ( network_ref["bridge"], network_ref["cidr_v6"], ) f.write(conf_str) # Make sure radvd can actually read it (it setuid()s to "nobody") os.chmod(conffile, 0644) pid = _ra_pid_for(network_ref["bridge"]) # if radvd is already running, then tell it to reload if pid: out, _err = _execute("cat /proc/%d/cmdline" % pid, check_exit_code=False) if conffile in out: try: _execute("sudo kill %d" % pid) except Exception as exc: # pylint: disable-msg=W0703 LOG.debug(_("killing radvd threw %s"), exc) else: LOG.debug(_("Pid %d is stale, relaunching radvd"), pid) command = _ra_cmd(network_ref) _execute(command) db.network_update(context, network_id, {"ra_server": utils.get_my_linklocal(network_ref["bridge"])})
def update_ra(context, network_id): network_ref = db.network_get(context, network_id) conffile = _ra_file(network_ref['bridge'], 'conf') with open(conffile, 'w') as f: conf_str = """ interface %s { AdvSendAdvert on; MinRtrAdvInterval 3; MaxRtrAdvInterval 10; prefix %s { AdvOnLink on; AdvAutonomous on; }; }; """ % (network_ref['bridge'], network_ref['cidr_v6']) f.write(conf_str) # Make sure radvd can actually read it (it setuid()s to "nobody") os.chmod(conffile, 0644) pid = _ra_pid_for(network_ref['bridge']) # if radvd is already running, then tell it to reload if pid: out, _err = _execute('cat', '/proc/%d/cmdline' % pid, check_exit_code=False) if conffile in out: try: _execute('sudo', 'kill', pid) except Exception as exc: # pylint: disable=W0703 LOG.debug(_('killing radvd threw %s'), exc) else: LOG.debug(_('Pid %d is stale, relaunching radvd'), pid) command = _ra_cmd(network_ref) _execute(*command) db.network_update(context, network_id, {'gateway_v6': utils.get_my_linklocal(network_ref['bridge'])})
def get_instance_nw_info(self, context, instance_id, instance_uuid, rxtx_factor, host): """This method is used by compute to fetch all network data that should be used when creating the VM. The method simply loops through all virtual interfaces stored in the nova DB and queries the IPAM lib to get the associated IP data. The format of returned data is 'defined' by the initial set of NetworkManagers found in nova/network/manager.py . Ideally this 'interface' will be more formally defined in the future. """ admin_context = context.elevated() project_id = context.project_id vifs = db.virtual_interface_get_by_instance(context, instance_id) net_tenant_dict = dict( (net_id, tenant_id) for (net_id, tenant_id) in self.ipam.get_project_and_global_net_ids(context, project_id) ) networks = {} for vif in vifs: if vif.get("network_id") is not None: network = db.network_get(admin_context, vif["network_id"]) net_tenant_id = net_tenant_dict[network["uuid"]] if net_tenant_id is None: net_tenant_id = FLAGS.quantum_default_tenant_id network = { "id": network["id"], "uuid": network["uuid"], "bridge": "", # Quantum ignores this field "label": network["label"], "project_id": net_tenant_id, } networks[vif["uuid"]] = network # update instance network cache and return network_info nw_info = self.build_network_info_model(context, vifs, networks, rxtx_factor, host) db.instance_info_cache_update(context, instance_uuid, {"network_info": nw_info.as_cache()}) return nw_info
def deallocate_for_instance(self, context, **kwargs): """Called when a VM is terminated. Loop through each virtual interface in the Nova DB and remove the Quantum port and clear the IP allocation using the IPAM. Finally, remove the virtual interfaces from the Nova DB. """ instance_id = kwargs.get('instance_id') project_id = kwargs.pop('project_id', None) admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif in vifs: network = db.network_get(admin_context, vif['network_id']) self.deallocate_port(vif['uuid'], network['uuid'], project_id, instance_id) ipam_tenant_id = self.deallocate_ip_address( context, network['uuid'], project_id, vif, instance_id) if FLAGS.quantum_use_dhcp: if network['host'] == self.host: self.update_dhcp(context, ipam_tenant_id, network, vif, project_id) else: topic = self.db.queue_get_for(context, FLAGS.network_topic, network['host']) rpc.call( context, topic, { 'method': 'update_dhcp', 'args': { 'ipam_tenant_id': ipam_tenant_id, 'network_ref': network, 'vif_ref': vif, 'project_id': network['project_id'] } }) db.virtual_interface_delete(admin_context, vif['id'])
def deallocate_for_instance(self, context, **kwargs): """Called when a VM is terminated. Loop through each virtual interface in the Nova DB and remove the Quantum port and clear the IP allocation using the IPAM. Finally, remove the virtual interfaces from the Nova DB. """ instance_id = kwargs.get("instance_id") project_id = kwargs.pop("project_id", None) admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif in vifs: network = db.network_get(admin_context, vif["network_id"]) self.deallocate_port(vif["uuid"], network["uuid"], project_id, instance_id) ipam_tenant_id = self.deallocate_ip_address(context, network["uuid"], project_id, vif, instance_id) if FLAGS.quantum_use_dhcp: if network["host"] == self.host: self.update_dhcp(context, ipam_tenant_id, network, vif, project_id) else: topic = self.db.queue_get_for(context, FLAGS.network_topic, network["host"]) rpc.call( context, topic, { "method": "update_dhcp", "args": { "ipam_tenant_id": ipam_tenant_id, "network_ref": network, "vif_ref": vif, "project_id": network["project_id"], }, }, ) db.virtual_interface_delete(admin_context, vif["id"])
def deallocate_for_instance(self, context, **kwargs): """Called when a VM is terminated. Loop through each virtual interface in the Nova DB and remove the Quantum port and clear the IP allocation using the IPAM. Finally, remove the virtual interfaces from the Nova DB. """ instance_id = kwargs.get('instance_id') project_id = kwargs.pop('project_id', None) admin_context = context.elevated() instance = db.instance_get(context, instance_id) vifs = db.virtual_interface_get_by_instance(admin_context, instance['uuid']) for vif in vifs: network = db.network_get(admin_context, vif['network_id']) self.deallocate_port(vif['uuid'], network['uuid'], project_id, instance_id) ipam_tenant_id = self.deallocate_ip_address(context, network['uuid'], project_id, vif, instance_id) if FLAGS.quantum_use_dhcp: if network['host'] == self.host: self.update_dhcp(context, ipam_tenant_id, network, vif, project_id) else: topic = rpc.queue_get_for(context, FLAGS.network_topic, network['host']) rpc.call(context, topic, {'method': 'update_dhcp', 'args': {'ipam_tenant_id': ipam_tenant_id, 'network_ref': network, 'vif_ref': vif, 'project_id': network['project_id']}}) db.virtual_interface_delete(admin_context, vif['id'])
def update_dhcp(context, network_id): """(Re)starts a dnsmasq server for a given network. If a dnsmasq instance is already running then send a HUP signal causing it to reload, otherwise spawn a new instance. """ network_ref = db.network_get(context, network_id) conffile = _dhcp_file(network_ref['bridge'], 'conf') with open(conffile, 'w') as f: f.write(get_dhcp_hosts(context, network_id)) # Make sure dnsmasq can actually read it (it setuid()s to "nobody") os.chmod(conffile, 0644) pid = _dnsmasq_pid_for(network_ref['bridge']) # if dnsmasq is already running, then tell it to reload if pid: out, _err = _execute('cat', '/proc/%d/cmdline' % pid, check_exit_code=False) if conffile in out: try: _execute('sudo', 'kill', '-HUP', pid) return except Exception as exc: # pylint: disable=W0703 LOG.debug(_('Hupping dnsmasq threw %s'), exc) else: LOG.debug(_('Pid %d is stale, relaunching dnsmasq'), pid) # FLAGFILE and DNSMASQ_INTERFACE in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, 'DNSMASQ_INTERFACE': network_ref['bridge']} command = _dnsmasq_cmd(network_ref) _execute(*command, addl_env=env)
def init_leases(network_id): """Get the list of hosts for a network.""" ctxt = context.get_admin_context() network_ref = db.network_get(ctxt, network_id) network_manager = importutils.import_object(CONF.network_manager) return network_manager.get_dhcp_leases(ctxt, network_ref)
def get_instance_nw_info(self, context, instance_id, instance_type_id, host): """This method is used by compute to fetch all network data that should be used when creating the VM. The method simply loops through all virtual interfaces stored in the nova DB and queries the IPAM lib to get the associated IP data. The format of returned data is 'defined' by the initial set of NetworkManagers found in nova/network/manager.py . Ideally this 'interface' will be more formally defined in the future. """ network_info = [] instance = db.instance_get(context, instance_id) project_id = instance.project_id admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif in vifs: net = db.network_get(admin_context, vif['network_id']) net_id = net['uuid'] if not net_id: # TODO(bgh): We need to figure out a way to tell if we # should actually be raising this exception or not. # In the case that a VM spawn failed it may not have # attached the vif and raising the exception here # prevents deletion of the VM. In that case we should # probably just log, continue, and move on. raise Exception(_("No network for for virtual interface %s") % vif['uuid']) ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context, net_id, vif['uuid'], project_id) v4_subnet, v6_subnet = \ self.ipam.get_subnets_by_net_id(context, ipam_tenant_id, net_id, vif['uuid']) v4_ips = self.ipam.get_v4_ips_by_interface(context, net_id, vif['uuid'], project_id=ipam_tenant_id) v6_ips = self.ipam.get_v6_ips_by_interface(context, net_id, vif['uuid'], project_id=ipam_tenant_id) def ip_dict(ip, subnet): return { "ip": ip, "netmask": subnet["netmask"], "enabled": "1"} network_dict = { 'cidr': v4_subnet['cidr'], 'injected': True, 'multi_host': False} q_tenant_id = project_id or FLAGS.quantum_default_tenant_id info = { 'label': self.q_conn.get_network_name(q_tenant_id, net_id), 'gateway': v4_subnet['gateway'], 'dhcp_server': v4_subnet['gateway'], 'broadcast': v4_subnet['broadcast'], 'mac': vif['address'], 'vif_uuid': vif['uuid'], 'dns': [], 'ips': [ip_dict(ip, v4_subnet) for ip in v4_ips]} if v6_subnet: if v6_subnet['cidr']: network_dict['cidr_v6'] = v6_subnet['cidr'] info['ip6s'] = [ip_dict(ip, v6_subnet) for ip in v6_ips] if v6_subnet['gateway']: info['gateway6'] = v6_subnet['gateway'] dns_dict = {} for s in [v4_subnet, v6_subnet]: for k in ['dns1', 'dns2']: if s and s[k]: dns_dict[s[k]] = None info['dns'] = [d for d in dns_dict.keys()] network_info.append((network_dict, info)) return network_info
def get(self, req, id): context = req.environ["nova.context"] net = db.network_get(context, id) return {"network": network_dict(net)}
def get_by_id(cls, context, network_id, project_only='allow_none'): db_network = db.network_get(context, network_id, project_only=project_only) return cls._from_db_object(context, cls(), db_network)
def get_instance_nw_info(self, context, instance_id, instance_type_id, host): """This method is used by compute to fetch all network data that should be used when creating the VM. The method simply loops through all virtual interfaces stored in the nova DB and queries the IPAM lib to get the associated IP data. The format of returned data is 'defined' by the initial set of NetworkManagers found in nova/network/manager.py . Ideally this 'interface' will be more formally defined in the future. """ network_info = [] instance = db.instance_get(context, instance_id) project_id = instance.project_id admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif in vifs: net = db.network_get(admin_context, vif['network_id']) net_id = net['uuid'] if not net_id: # TODO(bgh): We need to figure out a way to tell if we # should actually be raising this exception or not. # In the case that a VM spawn failed it may not have # attached the vif and raising the exception here # prevents deletion of the VM. In that case we should # probably just log, continue, and move on. raise Exception( _("No network for for virtual interface %s") % vif['uuid']) ipam_tenant_id = self.ipam.get_tenant_id_by_net_id( context, net_id, vif['uuid'], project_id) v4_subnet, v6_subnet = \ self.ipam.get_subnets_by_net_id(context, ipam_tenant_id, net_id, vif['uuid']) v4_ips = self.ipam.get_v4_ips_by_interface( context, net_id, vif['uuid'], project_id=ipam_tenant_id) v6_ips = self.ipam.get_v6_ips_by_interface( context, net_id, vif['uuid'], project_id=ipam_tenant_id) def ip_dict(ip, subnet): return {"ip": ip, "netmask": subnet["netmask"], "enabled": "1"} network_dict = { 'cidr': v4_subnet['cidr'], 'injected': True, 'bridge': net['bridge'], 'multi_host': False } q_tenant_id = project_id or FLAGS.quantum_default_tenant_id info = { 'label': self.q_conn.get_network_name(q_tenant_id, net_id), 'gateway': v4_subnet['gateway'], 'dhcp_server': v4_subnet['gateway'], 'broadcast': v4_subnet['broadcast'], 'mac': vif['address'], 'vif_uuid': vif['uuid'], 'dns': [], 'ips': [ip_dict(ip, v4_subnet) for ip in v4_ips] } if v6_subnet: if v6_subnet['cidr']: network_dict['cidr_v6'] = v6_subnet['cidr'] info['ip6s'] = [ip_dict(ip, v6_subnet) for ip in v6_ips] if v6_subnet['gateway']: info['gateway_v6'] = v6_subnet['gateway'] dns_dict = {} for s in [v4_subnet, v6_subnet]: for k in ['dns1', 'dns2']: if s and s[k]: dns_dict[s[k]] = None info['dns'] = [d for d in dns_dict.keys()] network_info.append((network_dict, info)) return network_info
def get_instance_nw_info(self, context, instance_id, instance_type_id, host): """This method is used by compute to fetch all network data that should be used when creating the VM. The method simply loops through all virtual interfaces stored in the nova DB and queries the IPAM lib to get the associated IP data. The format of returned data is 'defined' by the initial set of NetworkManagers found in nova/network/manager.py . Ideally this 'interface' will be more formally defined in the future. """ network_info = [] instance = db.instance_get(context, instance_id) project_id = instance.project_id admin_context = context.elevated() vifs = db.virtual_interface_get_by_instance(admin_context, instance_id) for vif in vifs: net = db.network_get(admin_context, vif["network_id"]) net_id = net["uuid"] if not net_id: # TODO(bgh): We need to figure out a way to tell if we # should actually be raising this exception or not. # In the case that a VM spawn failed it may not have # attached the vif and raising the exception here # prevents deletion of the VM. In that case we should # probably just log, continue, and move on. raise Exception(_("No network for for virtual interface %s") % vif["uuid"]) ipam_tenant_id = self.ipam.get_tenant_id_by_net_id(context, net_id, vif["uuid"], project_id) v4_subnets, v6_subnets = self.ipam.get_subnets_by_net_id(context, ipam_tenant_id, net_id, vif["uuid"]) v4_ips = self.ipam.get_v4_ips_by_interface(context, net_id, vif["uuid"], project_id=ipam_tenant_id) v6_ips = self.ipam.get_v6_ips_by_interface(context, net_id, vif["uuid"], project_id=ipam_tenant_id) def _make_network_dict_and_info(subnet, ips, net, vif, v6=False): def ip_dict(ip, subnet): return {"ip": ip, "netmask": subnet["netmask"], "enabled": "1"} network_dict = {"injected": True, "bridge": net["bridge"], "multi_host": False} q_tenant_id = project_id or FLAGS.quantum_default_tenant_id info = { "label": self.q_conn.get_network_name(q_tenant_id, net["uuid"]), "gateway": subnet["gateway"], "dhcp_server": subnet["gateway"], "broadcast": subnet["broadcast"], "mac": vif["address"], "vif_uuid": vif["uuid"], "dns": [], } if not v6: network_dict["cidr"] = subnet["cidr"] info["ips"] = [ip_dict(ip, subnet) for ip in ips] else: if subnet["cidr"]: network_dict["cidr_v6"] = subnet["cidr"] info["ip6s"] = [ip_dict(ip, subnet) for ip in ips] if subnet["gateway"]: info["gateway6"] = subnet["gateway"] dns_dict = {} for k in ["dns1", "dns2"]: if k in subnet and subnet[k]: dns_dict[subnet[k]] = None info["dns"] = [d for d in dns_dict.keys()] return network_dict, info for subnet in v4_subnets: network_dict, info = _make_network_dict_and_info(subnet, v4_ips, net, vif, False) network_info.append((network_dict, info)) for subnet in v6_subnets: network_dict, info = _make_network_dict_and_info(subnet, v6_ips, net, vif, True) network_info.append((network_dict, info)) return network_info
def get(self, req, id): context = req.environ['nova.context'] net = db.network_get(context, id) return {'network': network_dict(net)}
def index(self, req): """Return all flavors in brief.""" #flavors = self._get_flavors(req) #return self._view_builder.index(req, flavors) project_id=str(req.environ['HTTP_X_TENANT_ID']) context = req.environ['nova.context'] context = context.elevated() networks = db.network_get_all(context) nets=[dict(network.iteritems()) for network in networks] virtual_interfaces = db.virtual_interface_get_all(context) vifs=[dict(vif.iteritems()) for vif in virtual_interfaces] #make a dict of relationships between Network_IDs and Instance_IDs {1:[1,2],...} net_vm_dict = {} for vif in vifs: net_id=int(vif['network_id']) vm_id=int(vif['instance_id']) if net_id in net_vm_dict: net_vm_dict[net_id].append(vm_id) else: net_vm_dict[net_id] = [vm_id] print net_vm_dict #Go through the dict , filter by this project and get detailed infos #instance_get(context, instance_id) net_list = [] for netID in net_vm_dict: networks= db.network_get(context, netID) net = dict(networks.iteritems()) print str(net['project_id']) if net['project_id']==None or net['project_id']==project_id: print "my precious~~" net_info = {} net_info['id']=str(net['uuid']) net_info['name']=str(net['label']) net_info['cidr']=str(net['cidr']) net_info['vm']=[] net_list.append(net_info) for vmID in net_vm_dict[netID]: vms = db.instance_get(context, vmID) vm = dict(vms.iteritems()) if vm['project_id']==project_id: print "My VM" vm_info = {} #Get vm infos for each VM vm_info['name']=str(vm['hostname']) vm_info['id']=str(vm['uuid']) vm_info['vm_state']=str(vm['vm_state']) #Get fixed_ips for each VM fixed_ips = db.fixed_ip_get_by_instance(context, vmID) fixed_ip_info = [] for ip in fixed_ips: fixed_ip_info.append(str(dict(ip.iteritems())['address'])) vm_info['fixed_ips'] = fixed_ip_info #Get Floating_ips for each VM floating_ip_info = [] for fixed_ip in fixed_ip_info: try: floating_ips = db.floating_ip_get_by_address(context, fixed_ip) except exception.FloatingIpNotFoundForAddress: print "floating not found" continue if floating_ips != None: for floating_ip in floating_ips: floating_ip_info.append(str(dict(ip.floating_ip.iteritems()['address']))) vm_info['floating_ips']=floating_ip_info net_info['vm'].append(vm_info) ret_net_list={} ret_net_list['networks']=net_list print ret_net_list return ret_net_list