def test_set_network_hosts(self): self.mox.StubOutWithMock(db, "network_get_all") self.mox.StubOutWithMock(db, "network_set_host") self.mox.StubOutWithMock(db, "network_update") db.network_get_all(mox.IgnoreArg()).AndReturn([networks[0]]) db.network_set_host(mox.IgnoreArg(), networks[0]["id"], mox.IgnoreArg()).AndReturn(HOST) db.network_update(mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()) self.mox.ReplayAll() self.network.set_network_hosts(None)
def test_get_project_networks(self): context = self.mox.CreateMockAnything() context.elevated().AndReturn('elevated') networks = [{'project_id': 1}, {'project_id': None}] self.mox.StubOutWithMock(db, 'network_get_all') db.network_get_all('elevated').AndReturn(networks) self.mox.ReplayAll() values = self.ipam.get_project_networks(context) self.assertEquals(values, [networks[0]])
def get_project_networks(self, admin_context): try: nets = db.network_get_all(admin_context.elevated()) except exception.NoNetworksFound: return [] # only return networks with a project_id set return [net for net in nets if net['project_id']]
def get_new_cidr(self, size=256): cidr = "" cidrs = [] subnets = [] mask=int(32-math.log(size,2)) is_used = False #get all cidrs, if cidr=10.0.3.0/24 then subnet=3 for network in db.network_get_all(context): cidrs.append(str(network.cidr)) subnets.append(str(network.cidr).split('.')[2]) #get a new unused subnet id for i in range(0,254): is_used = False for subnet in subnets: if i == int(subnet): is_used = True break if is_used == False: break new_cidr = cidrs[0].split('.') new_cidr[2] = i new_cidr[-1] = '0/'+ str(mask) new_cidr_str = "" print "new cidr is:" print new_cidr for a in new_cidr: new_cidr_str = new_cidr_str+str(a)+'.' new_cidr_str = new_cidr_str[0:-1] return new_cidr_str
def setUp(self): super(QuantumNovaTestCase, self).setUp() # Create an actual project -- with this we will touch more of # the code in QuantumManager (related to fetching networks, etc) for x in ["fake_project1", "fake_project2"]: values = {"id": x, "name": x} project = db.project_create(context.get_admin_context(), values) self.net_man = quantum_manager.QuantumManager( ipam_lib="nova.network.quantum.nova_ipam_lib", q_conn=FakeQuantumClientConnection() ) # Tests seem to create some networks by default, which # we don't want. So we delete them. ctx = context.RequestContext("user1", "fake_project1").elevated() for n in db.network_get_all(ctx): db.network_delete_safe(ctx, n["id"]) # Other unit tests (e.g., test_compute.py) have a nasty # habit of of creating fixed IPs and not cleaning up, which # can confuse these tests, so we remove all existing fixed # ips before starting. session = get_session() result = session.query(models.FixedIp).all() with session.begin(): for fip_ref in result: session.delete(fip_ref)
def list(self): """List all created networks.""" _fmt = "%-5s\t%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s" print _fmt % (_('id'), _('IPv4'), _('IPv6'), _('start address'), _('DNS1'), _('DNS2'), _('VlanID'), _('project'), _("uuid")) try: # Since network_get_all can throw exception.NoNetworksFound # for this command to show a nice result, this exception # should be caught and handled as such. networks = db.network_get_all(context.get_admin_context()) except exception.NoNetworksFound: print _('No networks found') else: for network in networks: print _fmt % (network.id, network.cidr, network.cidr_v6, network.dhcp_start, network.dns1, network.dns2, network.vlan, network.project_id, network.uuid)
def list(self, host=None): """Lists all fixed ips (optionally by host).""" ctxt = context.get_admin_context() try: if host is None: fixed_ips = db.fixed_ip_get_all(ctxt) else: fixed_ips = db.fixed_ip_get_by_host(ctxt, host) except exception.NotFound as ex: print(_("error: %s") % ex) return(2) instances = db.instance_get_all(context.get_admin_context()) instances_by_uuid = {} for instance in instances: instances_by_uuid[instance['uuid']] = instance print("%-18s\t%-15s\t%-15s\t%s" % (_('network'), _('IP address'), _('hostname'), _('host'))) all_networks = {} try: # use network_get_all to retrieve all existing networks # this is to ensure that IPs associated with deleted networks # will not throw exceptions. for network in db.network_get_all(context.get_admin_context()): all_networks[network.id] = network except exception.NoNetworksFound: # do not have any networks, so even if there are IPs, these # IPs should have been deleted ones, so return. print(_('No fixed IP found.')) return has_ip = False for fixed_ip in fixed_ips: hostname = None host = None network = all_networks.get(fixed_ip['network_id']) if network: has_ip = True if fixed_ip.get('instance_uuid'): instance = instances_by_uuid.get(fixed_ip['instance_uuid']) if instance: hostname = instance['hostname'] host = instance['host'] else: print(_('WARNING: fixed ip %s allocated to missing' ' instance') % str(fixed_ip['address'])) print("%-18s\t%-15s\t%-15s\t%s" % ( network['cidr'], fixed_ip['address'], hostname, host)) if not has_ip: print(_('No fixed IP found.'))
def _delete_nets(self): for n in networks: ctx = context.RequestContext("user1", n["project_id"]) db_nets = db.network_get_all(ctx.elevated()) for x in db_nets: if x["label"] == n["label"]: n["uuid"] = x["uuid"] self.net_man.delete_network(ctx, None, n["uuid"])
def _delete_nets(self): for n in networks: ctx = context.RequestContext('user1', n['project_id']) db_nets = db.network_get_all(ctx.elevated()) for x in db_nets: if x['label'] == n['label']: n['uuid'] = x['uuid'] self.net_man.delete_network(ctx, None, n['uuid'])
def setup(): import mox # Fail fast if you don't have mox. Workaround for bug 810424 from nova import rpc # Register rpc_backend before fake_flags sets it FLAGS.register_opts(rpc.rpc_opts) from nova import context from nova import db from nova.db import migration from nova.network import manager as network_manager from nova.tests import fake_flags fake_flags.set_defaults(FLAGS) rpc.register_opts(FLAGS) if FLAGS.sql_connection == "sqlite://": if migration.db_version() > migration.INIT_VERSION: return else: testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db) if os.path.exists(testdb): return migration.db_sync() ctxt = context.get_admin_context() network = network_manager.VlanManager() bridge_interface = FLAGS.flat_interface or FLAGS.vlan_interface network.create_networks( ctxt, label="test", cidr=FLAGS.fixed_range, multi_host=FLAGS.multi_host, num_networks=FLAGS.num_networks, network_size=FLAGS.network_size, cidr_v6=FLAGS.fixed_range_v6, gateway=FLAGS.gateway, gateway_v6=FLAGS.gateway_v6, bridge=FLAGS.flat_network_bridge, bridge_interface=bridge_interface, vpn_start=FLAGS.vpn_start, vlan_start=FLAGS.vlan_start, dns1=FLAGS.flat_network_dns, ) for net in db.network_get_all(ctxt): network.set_network_host(ctxt, net) if FLAGS.sql_connection == "sqlite://": global _DB engine = get_engine() conn = engine.connect() _DB = "".join(line for line in conn.connection.iterdump()) else: cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db) shutil.copyfile(testdb, cleandb)
def index(self, req): tenant_id = urlparse.parse_qs(req.environ['QUERY_STRING']).get('tenant_id', [None])[0] context = req.environ['nova.context'] LOG.audit(_("Getting networks for project %s"), tenant_id or '<all>') if context.is_admin and not tenant_id: networks = db.network_get_all(context) elif tenant_id: networks = db.project_get_networks(context, tenant_id, associate=False) else: raise exc.HTTPNotFound() result = [network_dict(net_ref) for net_ref in networks] return {'networks': result}
def setUp(self): super(QuantumNovaTestCase, self).setUp() self.flags(quantum_use_dhcp=True) self.flags(l3_lib="nova.network.l3.LinuxNetL3") linuxdrv = "nova.network.linux_net.LinuxOVSInterfaceDriver" self.flags(linuxnet_interface_driver=linuxdrv) fc = fake_client.FakeClient(LOG) qc = quantum_connection.QuantumClientConnection(client=fc) self.net_man = quantum_manager.QuantumManager( ipam_lib="nova.network.quantum.nova_ipam_lib", q_conn=qc) def func(arg1, arg2): pass def func2(arg1, arg2, arg3): pass def func1(arg1): pass self.net_man.driver.update_dhcp_hostfile_with_text = func self.net_man.driver.restart_dhcp = func2 self.net_man.driver.kill_dhcp = func1 # Tests seem to create some networks by default, which # we don't want. So we delete them. ctx = context.RequestContext('user1', 'fake_project1').elevated() for n in db.network_get_all(ctx): db.network_delete_safe(ctx, n['id']) # Other unit tests (e.g., test_compute.py) have a nasty # habit of of creating fixed IPs and not cleaning up, which # can confuse these tests, so we remove all existing fixed # ips before starting. session = sql_session.get_session() result = session.query(models.FixedIp).all() with session.begin(): for fip_ref in result: session.delete(fip_ref) self.net_man.init_host()
def test_allocate_and_deallocate_instance_dynamic(self): self._create_nets() project_id = "fake_project2" ctx = context.RequestContext('user1', project_id) all_valid_networks = self.net_man.ipam.get_project_and_global_net_ids( ctx, project_id) requested_networks = [(n[0], None) for n in all_valid_networks] self.net_man.validate_networks(ctx, requested_networks) label_map = {} for n in db.network_get_all(ctx.elevated()): label_map[n['uuid']] = n['label'] expected_labels = [label_map[uid] for uid, _i in requested_networks] self._allocate_and_deallocate_instance(project_id, requested_networks, expected_labels) self._delete_nets()
def setup(): fake_flags.set_defaults(CONF) if CONF.sql_connection == "sqlite://": if migration.db_version() > migration.INIT_VERSION: return else: testdb = os.path.join(CONF.state_path, CONF.sqlite_db) if os.path.exists(testdb): return migration.db_sync() ctxt = context.get_admin_context() network = network_manager.VlanManager() bridge_interface = CONF.flat_interface or CONF.vlan_interface network.create_networks( ctxt, label="test", cidr=CONF.fixed_range, multi_host=CONF.multi_host, num_networks=CONF.num_networks, network_size=CONF.network_size, cidr_v6=CONF.fixed_range_v6, gateway=CONF.gateway, gateway_v6=CONF.gateway_v6, bridge=CONF.flat_network_bridge, bridge_interface=bridge_interface, vpn_start=CONF.vpn_start, vlan_start=CONF.vlan_start, dns1=CONF.flat_network_dns, ) for net in db.network_get_all(ctxt): network.set_network_host(ctxt, net) if CONF.sql_connection == "sqlite://": global _DB engine = get_engine() conn = engine.connect() _DB = "".join(line for line in conn.connection.iterdump()) else: cleandb = os.path.join(CONF.state_path, CONF.sqlite_clean_db) shutil.copyfile(testdb, cleandb)
def setup(): import os import shutil from nova import context from nova import flags from nova import db from nova.db import migration from nova.network import manager as network_manager from nova.tests import fake_flags FLAGS = flags.FLAGS testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db) if os.path.exists(testdb): return migration.db_sync() ctxt = context.get_admin_context() network = network_manager.VlanManager() bridge_interface = FLAGS.flat_interface or FLAGS.vlan_interface network.create_networks( ctxt, label="test", cidr=FLAGS.fixed_range, multi_host=FLAGS.multi_host, num_networks=FLAGS.num_networks, network_size=FLAGS.network_size, cidr_v6=FLAGS.fixed_range_v6, gateway=FLAGS.gateway, gateway_v6=FLAGS.gateway_v6, bridge=FLAGS.flat_network_bridge, bridge_interface=bridge_interface, vpn_start=FLAGS.vpn_start, vlan_start=FLAGS.vlan_start, dns1=FLAGS.flat_network_dns, ) for net in db.network_get_all(ctxt): network.set_network_host(ctxt, net) cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db) shutil.copyfile(testdb, cleandb)
def setUp(self): super(SampleNetworks, self).setUp() ctxt = context.get_admin_context() network = network_manager.VlanManager(host=self.host) bridge_interface = CONF.flat_interface or CONF.vlan_interface network.create_networks(ctxt, label='test', cidr='10.0.0.0/8', multi_host=CONF.multi_host, num_networks=CONF.num_networks, network_size=CONF.network_size, cidr_v6=CONF.fixed_range_v6, gateway=CONF.gateway, gateway_v6=CONF.gateway_v6, bridge=CONF.flat_network_bridge, bridge_interface=bridge_interface, vpn_start=CONF.vpn_start, vlan_start=CONF.vlan_start, dns1=CONF.flat_network_dns) for net in db.network_get_all(ctxt): network.set_network_host(ctxt, net)
def post_migrations(self): """Any addition steps that are needed outside of the migrations.""" ctxt = context.get_admin_context() network = network_manager.VlanManager() bridge_interface = CONF.flat_interface or CONF.vlan_interface network.create_networks(ctxt, label='test', cidr=CONF.fixed_range, multi_host=CONF.multi_host, num_networks=CONF.num_networks, network_size=CONF.network_size, cidr_v6=CONF.fixed_range_v6, gateway=CONF.gateway, gateway_v6=CONF.gateway_v6, bridge=CONF.flat_network_bridge, bridge_interface=bridge_interface, vpn_start=CONF.vpn_start, vlan_start=CONF.vlan_start, dns1=CONF.flat_network_dns) for net in db.network_get_all(ctxt): network.set_network_host(ctxt, net)
def setup(): import mox # Fail fast if you don't have mox. Workaround for bug 810424 import os import shutil from nova import context from nova import flags from nova import db from nova.db import migration from nova.network import manager as network_manager from nova.tests import fake_flags FLAGS = flags.FLAGS testdb = os.path.join(FLAGS.state_path, FLAGS.sqlite_db) if os.path.exists(testdb): return migration.db_sync() ctxt = context.get_admin_context() network = network_manager.VlanManager() bridge_interface = FLAGS.flat_interface or FLAGS.vlan_interface network.create_networks(ctxt, label='test', cidr=FLAGS.fixed_range, multi_host=FLAGS.multi_host, num_networks=FLAGS.num_networks, network_size=FLAGS.network_size, cidr_v6=FLAGS.fixed_range_v6, gateway=FLAGS.gateway, gateway_v6=FLAGS.gateway_v6, bridge=FLAGS.flat_network_bridge, bridge_interface=bridge_interface, vpn_start=FLAGS.vpn_start, vlan_start=FLAGS.vlan_start, dns1=FLAGS.flat_network_dns) for net in db.network_get_all(ctxt): network.set_network_host(ctxt, net) cleandb = os.path.join(FLAGS.state_path, FLAGS.sqlite_clean_db) shutil.copyfile(testdb, cleandb)
def _validate_nw_info(self, nw_info, expected_net_labels): self.assertEquals(len(nw_info), len(expected_net_labels)) ctx = context.RequestContext('user1', 'foo').elevated() all_net_map = {} for n in db.network_get_all(ctx): all_net_map[n['label']] = n for i in range(0, len(nw_info)): vif = nw_info[i] net = all_net_map[expected_net_labels[i]] # simple test assumes that each starting prefix is unique expected_v4_cidr_start = net['cidr'].split(".")[0].lower() expected_v6_cidr_start = net['cidr_v6'].split(":")[0].lower() for subnet in vif['network']['subnets']: addr = subnet['ips'][0]['address'] if subnet['version'] == 4: address_start = addr.split(".")[0].lower() self.assertTrue(expected_v4_cidr_start, address_start) else: address_start = addr.split(":")[0].lower() self.assertTrue(expected_v6_cidr_start, address_start) # confirm that there is a DHCP device on corresponding net for l in expected_net_labels: n = all_net_map[l] tenant_id = (n['project_id'] or FLAGS.quantum_default_tenant_id) ports = self.net_man.q_conn.get_attached_ports( tenant_id, n['uuid']) self.assertEquals(len(ports), 2) # gw + instance VIF # make sure we aren't allowed to delete network with # active port self.assertRaises(exception.NetworkBusy, self.net_man.delete_network, ctx, None, n['uuid'])
def setup(): fake_flags.set_defaults(CONF) if CONF.sql_connection == "sqlite://": if migration.db_version() > migration.INIT_VERSION: return else: testdb = os.path.join(CONF.state_path, CONF.sqlite_db) if os.path.exists(testdb): return migration.db_sync() ctxt = context.get_admin_context() network = network_manager.VlanManager() bridge_interface = CONF.flat_interface or CONF.vlan_interface network.create_networks(ctxt, label='test', cidr=CONF.fixed_range, multi_host=CONF.multi_host, num_networks=CONF.num_networks, network_size=CONF.network_size, cidr_v6=CONF.fixed_range_v6, gateway=CONF.gateway, gateway_v6=CONF.gateway_v6, bridge=CONF.flat_network_bridge, bridge_interface=bridge_interface, vpn_start=CONF.vpn_start, vlan_start=CONF.vlan_start, dns1=CONF.flat_network_dns) for net in db.network_get_all(ctxt): network.set_network_host(ctxt, net) if CONF.sql_connection == "sqlite://": global _DB engine = get_engine() conn = engine.connect() _DB = "".join(line for line in conn.connection.iterdump()) else: cleandb = os.path.join(CONF.state_path, CONF.sqlite_clean_db) shutil.copyfile(testdb, cleandb)
def setUp(self): super(QuantumNovaTestCase, self).setUp() self.net_man = quantum_manager.QuantumManager( ipam_lib="nova.network.quantum.nova_ipam_lib", q_conn=FakeQuantumClientConnection() ) # Tests seem to create some networks by default, which # we don't want. So we delete them. ctx = context.RequestContext("user1", "fake_project1").elevated() for n in db.network_get_all(ctx): db.network_delete_safe(ctx, n["id"]) # Other unit tests (e.g., test_compute.py) have a nasty # habit of of creating fixed IPs and not cleaning up, which # can confuse these tests, so we remove all existing fixed # ips before starting. session = get_session() result = session.query(models.FixedIp).all() with session.begin(): for fip_ref in result: session.delete(fip_ref)
def list(self): """List all created networks""" _fmt = "%-5s\t%-18s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s\t%-15s" print _fmt % (_('id'), _('IPv4'), _('IPv6'), _('start address'), _('DNS1'), _('DNS2'), _('VlanID'), _('project'), _("uuid")) for network in db.network_get_all(context): print _fmt % (network.id, network.cidr, network.cidr_v6, network.dhcp_start, network.dns1, network.dns2, network.vlan, network.project_id, network.uuid) print FLAGS.fixed_range
def setUp(self): super(QuantumNovaTestCase, self).setUp() self.net_man = quantum_manager.QuantumManager( ipam_lib="nova.network.quantum.nova_ipam_lib", q_conn=FakeQuantumClientConnection()) # Tests seem to create some networks by default, which # we don't want. So we delete them. ctx = context.RequestContext('user1', 'fake_project1').elevated() for n in db.network_get_all(ctx): db.network_delete_safe(ctx, n['id']) # Other unit tests (e.g., test_compute.py) have a nasty # habit of of creating fixed IPs and not cleaning up, which # can confuse these tests, so we remove all existing fixed # ips before starting. session = get_session() result = session.query(models.FixedIp).all() with session.begin(): for fip_ref in result: session.delete(fip_ref)
def index(self, req): """Return all flavors in brief.""" #flavors = self._get_flavors(req) #return self._view_builder.index(req, flavors) project_id=str(req.environ['HTTP_X_TENANT_ID']) context = req.environ['nova.context'] context = context.elevated() networks = db.network_get_all(context) nets=[dict(network.iteritems()) for network in networks] virtual_interfaces = db.virtual_interface_get_all(context) vifs=[dict(vif.iteritems()) for vif in virtual_interfaces] #make a dict of relationships between Network_IDs and Instance_IDs {1:[1,2],...} net_vm_dict = {} for vif in vifs: net_id=int(vif['network_id']) vm_id=int(vif['instance_id']) if net_id in net_vm_dict: net_vm_dict[net_id].append(vm_id) else: net_vm_dict[net_id] = [vm_id] print net_vm_dict #Go through the dict , filter by this project and get detailed infos #instance_get(context, instance_id) net_list = [] for netID in net_vm_dict: networks= db.network_get(context, netID) net = dict(networks.iteritems()) print str(net['project_id']) if net['project_id']==None or net['project_id']==project_id: print "my precious~~" net_info = {} net_info['id']=str(net['uuid']) net_info['name']=str(net['label']) net_info['cidr']=str(net['cidr']) net_info['vm']=[] net_list.append(net_info) for vmID in net_vm_dict[netID]: vms = db.instance_get(context, vmID) vm = dict(vms.iteritems()) if vm['project_id']==project_id: print "My VM" vm_info = {} #Get vm infos for each VM vm_info['name']=str(vm['hostname']) vm_info['id']=str(vm['uuid']) vm_info['vm_state']=str(vm['vm_state']) #Get fixed_ips for each VM fixed_ips = db.fixed_ip_get_by_instance(context, vmID) fixed_ip_info = [] for ip in fixed_ips: fixed_ip_info.append(str(dict(ip.iteritems())['address'])) vm_info['fixed_ips'] = fixed_ip_info #Get Floating_ips for each VM floating_ip_info = [] for fixed_ip in fixed_ip_info: try: floating_ips = db.floating_ip_get_by_address(context, fixed_ip) except exception.FloatingIpNotFoundForAddress: print "floating not found" continue if floating_ips != None: for floating_ip in floating_ips: floating_ip_info.append(str(dict(ip.floating_ip.iteritems()['address']))) vm_info['floating_ips']=floating_ip_info net_info['vm'].append(vm_info) ret_net_list={} ret_net_list['networks']=net_list print ret_net_list return ret_net_list
def get_all_host_states(self, context): """Returns a list of HostStates that represents all the hosts the HostManager knows about. Also, each of the consumable resources in HostState are pre-populated and adjusted based on data in the db. """ service_refs = { service.host: service for service in objects.ServiceList.get_by_binary( context, 'nova-compute') } # Get resource usage across the available compute nodes: compute_nodes = objects.ComputeNodeList.get_all(context) seen_nodes = set() for compute in compute_nodes: service = service_refs.get(compute.host) if not service: LOG.warning( _LW("No compute service record found for host %(host)s"), {'host': compute.host}) continue host = compute.host node = compute.hypervisor_hostname state_key = (host, node) # Pf9: Get networks available on compute node network_ids = [ n['network_id'] for n in db.compute_node_get_all_networks_pf9( context, compute['id']) ] networks = [] if network_ids: all_networks = db.network_get_all(context) networks = filter(lambda n: n['id'] in network_ids, all_networks) else: LOG.info(_LI("No networks on host {host}".\ format(host=compute['id']))) # PF9 end host_state = self.host_state_map.get(state_key) if host_state: host_state.update_from_compute_node(compute) else: host_state = self.host_state_cls(host, node, compute=compute) self.host_state_map[state_key] = host_state # We force to update the aggregates info each time a new request # comes in, because some changes on the aggregates could have been # happening after setting this field for the first time host_state.aggregates = [ self.aggs_by_id[agg_id] for agg_id in self.host_aggregates_map[host_state.host] ] host_state.update_service(dict(service)) # PF9 change host_state.update_from_network_info(networks) self._add_instance_info(context, compute, host_state) seen_nodes.add(state_key) # remove compute nodes from host_state_map if they are not active dead_nodes = set(self.host_state_map.keys()) - seen_nodes for state_key in dead_nodes: host, node = state_key LOG.info( _LI("Removing dead compute node %(host)s:%(node)s " "from scheduler"), { 'host': host, 'node': node }) del self.host_state_map[state_key] return six.itervalues(self.host_state_map)
def get_all(cls, context, project_only='allow_none'): db_networks = db.network_get_all(context, project_only) return obj_base.obj_make_list(context, cls(context), objects.Network, db_networks)