def test_get_by_service_host_and_instantiated(self): self._create_nodes() r = db.bm_node_get_all(self.context, service_host='host1', instantiated=True) self.assertEquals(len(r), 0) r = db.bm_node_get_all(self.context, service_host='host1', instantiated=False) self.assertEquals(len(r), 1) self.assertEquals(r[0]['pm_address'], '0') r = db.bm_node_get_all(self.context, service_host='host2', instantiated=True) self.assertEquals(len(r), 1) self.assertEquals(r[0]['pm_address'], '1') r = db.bm_node_get_all(self.context, service_host='host2', instantiated=False) self.assertEquals(len(r), 4) pmaddrs = [x['pm_address'] for x in r] self.assertIn('2', pmaddrs) self.assertIn('3', pmaddrs) self.assertIn('4', pmaddrs) self.assertIn('5', pmaddrs)
def test_get_all(self): r = db.bm_node_get_all(self.context) self.assertEquals(r, []) self._create_nodes() r = db.bm_node_get_all(self.context) self.assertEquals(len(r), 6)
def test_index(self): nodes = [{"id": 1}, {"id": 2}] interfaces = [{"id": 1, "address": "11:11:11:11:11:11"}, {"id": 2, "address": "22:22:22:22:22:22"}] self.mox.StubOutWithMock(db, "bm_node_get_all") self.mox.StubOutWithMock(db, "bm_interface_get_all_by_bm_node_id") db.bm_node_get_all(self.context).AndReturn(nodes) db.bm_interface_get_all_by_bm_node_id(self.context, 1).AndRaise(exception.NodeNotFound(node_id=1)) db.bm_interface_get_all_by_bm_node_id(self.context, 2).AndReturn(interfaces) self.mox.ReplayAll() res_dict = self.controller.index(self.request) self.assertEqual(2, len(res_dict["nodes"])) self.assertEqual([], res_dict["nodes"][0]["interfaces"]) self.assertEqual(2, len(res_dict["nodes"][1]["interfaces"]))
def get_host_stats(self, refresh=False): caps = [] context = nova_context.get_admin_context() nodes = db.bm_node_get_all(context, service_host=CONF.host) for node in nodes: res = self._node_resource(node) nodename = str(node["uuid"]) data = {} data["vcpus"] = res["vcpus"] data["vcpus_used"] = res["vcpus_used"] data["cpu_info"] = res["cpu_info"] data["disk_total"] = res["local_gb"] data["disk_used"] = res["local_gb_used"] data["disk_available"] = res["local_gb"] - res["local_gb_used"] data["host_memory_total"] = res["memory_mb"] data["host_memory_free"] = res["memory_mb"] - res["memory_mb_used"] data["hypervisor_type"] = res["hypervisor_type"] data["hypervisor_version"] = res["hypervisor_version"] data["hypervisor_hostname"] = nodename data["supported_instances"] = self.supported_instances data.update(self.extra_specs) data["host"] = CONF.host data["node"] = nodename # TODO(NTTdocomo): put node's extra specs here caps.append(data) return caps
def test_destroy_with_interfaces(self): self._create_nodes() if_a_id = db.bm_interface_create(self.context, self.ids[0], 'aa:aa:aa:aa:aa:aa', None, None) if_b_id = db.bm_interface_create(self.context, self.ids[0], 'bb:bb:bb:bb:bb:bb', None, None) if_x_id = db.bm_interface_create(self.context, self.ids[1], '11:22:33:44:55:66', None, None) db.bm_node_destroy(self.context, self.ids[0]) self.assertRaises( exception.NovaException, db.bm_interface_get, self.context, if_a_id) self.assertRaises( exception.NovaException, db.bm_interface_get, self.context, if_b_id) # Another node's interface is not affected if_x = db.bm_interface_get(self.context, if_x_id) self.assertEqual(self.ids[1], if_x['bm_node_id']) self.assertRaises( exception.NodeNotFound, db.bm_node_get, self.context, self.ids[0]) r = db.bm_node_get_all(self.context) self.assertEquals(len(r), 5)
def index(self, req): context = req.environ['nova.context'] authorize(context) nodes = [] if _use_ironic(): # proxy command to Ironic icli = _get_ironic_client() ironic_nodes = icli.node.list(detail=True) for inode in ironic_nodes: node = {'id': inode.uuid, 'interfaces': [], 'host': 'IRONIC MANAGED', 'task_state': inode.provision_state, 'cpus': inode.properties.get('cpus', 0), 'memory_mb': inode.properties.get('memory_mb', 0), 'disk_gb': inode.properties.get('local_gb', 0)} nodes.append(node) else: # use nova baremetal nodes_from_db = db.bm_node_get_all(context) for node_from_db in nodes_from_db: try: ifs = db.bm_interface_get_all_by_bm_node_id( context, node_from_db['id']) except exception.NodeNotFound: ifs = [] node = self._node_dict(node_from_db) node['interfaces'] = [_interface_dict(i) for i in ifs] nodes.append(node) return {'nodes': nodes}
def get_host_stats(self, refresh=False): caps = [] context = nova_context.get_admin_context() nodes = db.bm_node_get_all(context, service_host=CONF.host) for node in nodes: res = self._node_resource(node) nodename = str(node['uuid']) data = {} data['vcpus'] = res['vcpus'] data['vcpus_used'] = res['vcpus_used'] data['cpu_info'] = res['cpu_info'] data['disk_total'] = res['local_gb'] data['disk_used'] = res['local_gb_used'] data['disk_available'] = res['local_gb'] - res['local_gb_used'] data['host_memory_total'] = res['memory_mb'] data['host_memory_free'] = res['memory_mb'] - res['memory_mb_used'] data['hypervisor_type'] = res['hypervisor_type'] data['hypervisor_version'] = res['hypervisor_version'] data['hypervisor_hostname'] = nodename data['supported_instances'] = self.supported_instances data.update(self.extra_specs) data['host'] = CONF.host data['node'] = nodename # TODO(NTTdocomo): put node's extra specs here caps.append(data) return caps
def test_sort(self): self._create_nodes() l = db.bm_node_get_all(self.context, 'host2', sort=True) self.assertEqual(len(l), 5) self.assertEqual(l[0]['pm_address'], '2') self.assertEqual(l[1]['pm_address'], '3') self.assertEqual(l[2]['pm_address'], '4') self.assertEqual(l[3]['pm_address'], '5') self.assertEqual(l[4]['pm_address'], '1')
def test_get_by_instantiated(self): self._create_nodes() r = db.bm_node_get_all(self.context, instantiated=None) self.assertEquals(len(r), 6) r = db.bm_node_get_all(self.context, instantiated=True) self.assertEquals(len(r), 1) self.assertEquals(r[0]['pm_address'], '1') r = db.bm_node_get_all(self.context, instantiated=False) self.assertEquals(len(r), 5) pmaddrs = [x['pm_address'] for x in r] self.assertIn('0', pmaddrs) self.assertIn('2', pmaddrs) self.assertIn('3', pmaddrs) self.assertIn('4', pmaddrs) self.assertIn('5', pmaddrs)
def test_destroy(self): self._create_nodes() db.bm_node_destroy(self.context, self.ids[0]) r = db.bm_node_get(self.context, self.ids[0]) self.assertTrue(r is None) r = db.bm_node_get_all(self.context) self.assertEquals(len(r), 5)
def test_index(self): nodes = [{'id': 1}, {'id': 2}, ] interfaces = [{'id': 1, 'address': '11:11:11:11:11:11'}, {'id': 2, 'address': '22:22:22:22:22:22'}, ] self.mox.StubOutWithMock(db, 'bm_node_get_all') self.mox.StubOutWithMock(db, 'bm_interface_get_all_by_bm_node_id') db.bm_node_get_all(self.context).AndReturn(nodes) db.bm_interface_get_all_by_bm_node_id(self.context, 1).\ AndRaise(exception.NodeNotFound(node_id=1)) db.bm_interface_get_all_by_bm_node_id(self.context, 2).\ AndReturn(interfaces) self.mox.ReplayAll() res_dict = self.controller.index(self.request) self.assertEqual(2, len(res_dict['nodes'])) self.assertEqual([], res_dict['nodes'][0]['interfaces']) self.assertEqual(2, len(res_dict['nodes'][1]['interfaces']))
def _get_baremetal_node_by_instance_name(instance_name): context = nova_context.get_admin_context() for node in bmdb.bm_node_get_all(context, service_host=FLAGS.host): if not node['instance_uuid']: continue try: inst = db.instance_get_by_uuid(context, node['instance_uuid']) if inst['name'] == instance_name: return node except exception.InstanceNotFound: continue return None
def test_get_by_service_host(self): self._create_nodes() r = db.bm_node_get_all(self.context, service_host=None) self.assertEquals(len(r), 6) r = db.bm_node_get_all(self.context, service_host="host1") self.assertEquals(len(r), 1) self.assertEquals(r[0]["pm_address"], "0") r = db.bm_node_get_all(self.context, service_host="host2") self.assertEquals(len(r), 5) pmaddrs = [x["pm_address"] for x in r] self.assertIn("1", pmaddrs) self.assertIn("2", pmaddrs) self.assertIn("3", pmaddrs) self.assertIn("4", pmaddrs) self.assertIn("5", pmaddrs) r = db.bm_node_get_all(self.context, service_host="host3") self.assertEquals(r, [])
def test_destroy(self): self._create_nodes() db.bm_node_destroy(self.context, self.ids[0]) self.assertRaises( exception.NodeNotFound, db.bm_node_get, self.context, self.ids[0]) r = db.bm_node_get_all(self.context) self.assertEquals(len(r), 5)
def test_get_by_service_host(self): self._create_nodes() r = db.bm_node_get_all(self.context, service_host=None) self.assertEquals(len(r), 6) r = db.bm_node_get_all(self.context, service_host="host1") self.assertEquals(len(r), 1) self.assertEquals(r[0]['pm_address'], '0') r = db.bm_node_get_all(self.context, service_host="host2") self.assertEquals(len(r), 5) pmaddrs = [x['pm_address'] for x in r] self.assertIn('1', pmaddrs) self.assertIn('2', pmaddrs) self.assertIn('3', pmaddrs) self.assertIn('4', pmaddrs) self.assertIn('5', pmaddrs) r = db.bm_node_get_all(self.context, service_host="host3") self.assertEquals(r, [])
def _test_index(self, ext_status=False): nodes = [{'id': 1}, {'id': 2}, ] interfaces = [{'id': 1, 'address': '11:11:11:11:11:11'}, {'id': 2, 'address': '22:22:22:22:22:22'}, ] self.mox.StubOutWithMock(db, 'bm_node_get_all') self.mox.StubOutWithMock(db, 'bm_interface_get_all_by_bm_node_id') db.bm_node_get_all(self.context).AndReturn(nodes) db.bm_interface_get_all_by_bm_node_id(self.context, 1).\ AndRaise(exception.NodeNotFound(node_id=1)) for n in nodes: self.ext_mgr.is_loaded('os-baremetal-ext-status').\ AndReturn(ext_status) db.bm_interface_get_all_by_bm_node_id(self.context, 2).\ AndReturn(interfaces) self.mox.ReplayAll() res_dict = self.controller.index(self.request) self.assertEqual(2, len(res_dict['nodes'])) self.assertEqual([], res_dict['nodes'][0]['interfaces']) self.assertEqual(2, len(res_dict['nodes'][1]['interfaces']))
def index(self, req): context = req.environ["nova.context"] authorize(context) nodes_from_db = db.bm_node_get_all(context) nodes = [] for node_from_db in nodes_from_db: try: ifs = db.bm_interface_get_all_by_bm_node_id(context, node_from_db["id"]) except exception.NodeNotFound: ifs = [] node = _node_dict(node_from_db) node["interfaces"] = [_interface_dict(i) for i in ifs] nodes.append(node) return {"nodes": nodes}
def index(self, req): context = req.environ['nova.context'] authorize(context) nodes_from_db = db.bm_node_get_all(context) nodes = [] for node_from_db in nodes_from_db: try: ifs = db.bm_interface_get_all_by_bm_node_id( context, node_from_db['id']) except exception.InstanceNotFound: ifs = [] node = _node_dict(node_from_db) node['interfaces'] = [_interface_dict(i) for i in ifs] nodes.append(node) return {'nodes': nodes}
def _get_baremetal_node_by_instance_name(virtapi, instance_name): context = nova_context.get_admin_context() # TODO(deva): optimize this DB query. # I don't think it should be _get_all for node in bmdb.bm_node_get_all(context, service_host=CONF.host): if not node['instance_uuid']: continue try: inst = virtapi.instance_get_by_uuid(context, node['instance_uuid']) if inst['name'] == instance_name: return node except exception.InstanceNotFound: continue # raise exception if we found no matching instance raise exception.InstanceNotFound(instance_name)
def update_from_compute_node(self, compute, context=None): """Update information about a host from its compute_node info.""" if self.baremetal_compute: service_host = compute['service']['host'] bm_nodes = bmdb.bm_node_get_all(context, service_host=service_host) for n in bm_nodes: if not n['instance_uuid']: self.available_nodes.append(n) """those sorting should be decided by weight in a scheduler.""" self.available_nodes = sorted(self.available_nodes, key=operator.itemgetter('memory_mb'), reverse=True) self.available_nodes = sorted(self.available_nodes, key=operator.itemgetter('cpus'), reverse=True) if len(self.available_nodes): bm_node = self.available_nodes[0] else: bm_node = {} bm_node['local_gb'] = 0 bm_node['memory_mb'] = 0 bm_node['cpus'] = 0 all_disk_mb = bm_node['local_gb'] * 1024 all_ram_mb = bm_node['memory_mb'] vcpus_total = bm_node['cpus'] else: all_disk_mb = compute['local_gb'] * 1024 all_ram_mb = compute['memory_mb'] vcpus_total = compute['vcpus'] if FLAGS.reserved_host_disk_mb > 0: all_disk_mb -= FLAGS.reserved_host_disk_mb if FLAGS.reserved_host_memory_mb > 0: all_ram_mb -= FLAGS.reserved_host_memory_mb self.free_ram_mb = all_ram_mb self.total_usable_ram_mb = all_ram_mb self.free_disk_mb = all_disk_mb self.vcpus_total = vcpus_total
def get_available_nodes(self, refresh=False): context = nova_context.get_admin_context() return [str(n['uuid']) for n in db.bm_node_get_all(context, service_host=CONF.host)]
def _fullbuild(conn): LOG.debug('_fullbuild begin') tenants_networks_filters = {} def _extend(tenant_id, network_id, filter_bodys): if tenant_id not in tenants_networks_filters: tenants_networks_filters[tenant_id] = {} if network_id not in tenants_networks_filters[tenant_id]: tenants_networks_filters[tenant_id][network_id] = [] tenants_networks_filters[tenant_id][network_id].extend(filter_bodys) ctxt = context.get_admin_context() hosts = bmdb.bm_node_get_all(ctxt) for t in hosts: if not t.instance_id: continue LOG.debug('to id=%s instance_id=%s', t.id, t.instance_id) ti = db.instance_get(ctxt, t.instance_id) # DHCP from the instance for (in_port, network_uuid, mac, _) \ in _from_bm_node(ti.id, ti.project_id): filter_bodys = [] filter_bodys.extend(_build_allow_dhcp_client(in_port, mac)) filter_bodys.extend(_build_deny_dhcp_server(in_port)) LOG.debug("filter_bodys: %s", filter_bodys) _extend(ti.project_id, network_uuid, filter_bodys) # from external host to the instance LOG.debug('from=* to.id=%s', t.id) for (_, network_uuid, _, t_ips) in _from_bm_node(ti.id, ti.project_id): filter_bodys = [] for t_ip in t_ips: for sg in db.security_group_get_by_instance(ctxt, ti.id): rules = db.security_group_rule_get_by_security_group( ctxt, sg.id) for rule in rules: rule_f = _build_sg_rule_filter( t_ip + "/32", rule, EXTERNAL_SECURITY_GROUP_PRIORITY) filter_bodys.extend(rule_f) rule_f = _build_default_drop_filter(t_ip + "/32") filter_bodys.extend(rule_f) LOG.debug("filter_bodys: %s", filter_bodys) _extend(ti.project_id, network_uuid, filter_bodys) # Just to make lines short... _sg_rules = db.security_group_rule_get_by_security_group _build = _build_full_sg_rule_filter # from other instances to the instance for f in hosts: LOG.debug('from.id=%s to.id=%s', f.id, t.id) if f.id == t.id: continue if not f.instance_id: continue fi = db.instance_get(ctxt, f.instance_id) LOG.debug('from id=%s instance_id=%s', f.id, f.instance_id) for (in_port, network_uuid, mac, f_ips) in _from_bm_node(fi.id, fi.project_id): filter_bodys = [] for (_, _, _, t_ips) in _from_bm_node(ti.id, ti.project_id): for f_ip in f_ips: for t_ip in t_ips: for sg in db.security_group_get_by_instance( ctxt, ti.id): rules = _sg_rules(ctxt, sg.id) for rule in rules: if rule.cidr and not _in_cidr( f_ip, rule.cidr): continue rule_f = _build(in_port, mac, f_ip + "/32", t_ip + "/32", rule) filter_bodys.extend(rule_f) rule_f = _build_full_default_drop_filter( in_port, mac, f_ip + "/32", t_ip + "/32") filter_bodys.extend(rule_f) LOG.debug("filter_bodys: %s", filter_bodys) _extend(fi.project_id, network_uuid, filter_bodys) LOG.debug('begin update filters') for (tenant_id, nf) in tenants_networks_filters.iteritems(): for (network_id, filter_bodys) in nf.iteritems(): old_fids = _list_filters(conn, tenant_id, network_id) LOG.debug("delete filters tenant_id=%s network_id=%s ids=\n%s", tenant_id, network_id, _pp(old_fids)) _delete_filters(conn, tenant_id, network_id, old_fids) LOG.debug("create filters tenant_id=%s network_id=%s bodys=\n%s", tenant_id, network_id, _pp(filter_bodys)) _create_filters(conn, tenant_id, network_id, filter_bodys) LOG.debug('end update filters') LOG.debug('_fullbuild end')
def _get_baremetal_nodes(context): nodes = db.bm_node_get_all(context, service_host=CONF.host) return nodes
def _fullbuild(conn): LOG.debug('_fullbuild begin') tenants_networks_filters = {} def _extend(tenant_id, network_id, filter_bodys): if tenant_id not in tenants_networks_filters: tenants_networks_filters[tenant_id] = {} if network_id not in tenants_networks_filters[tenant_id]: tenants_networks_filters[tenant_id][network_id] = [] tenants_networks_filters[tenant_id][network_id].extend(filter_bodys) ctxt = context.get_admin_context() hosts = bmdb.bm_node_get_all(ctxt) for t in hosts: if not t.instance_id: continue LOG.debug('to id=%s instance_id=%s', t.id, t.instance_id) ti = db.instance_get(ctxt, t.instance_id) # DHCP from the instance for (in_port, network_uuid, mac, _) \ in _from_bm_node(ti.id, ti.project_id): filter_bodys = [] filter_bodys.extend(_build_allow_dhcp_client(in_port, mac)) filter_bodys.extend(_build_deny_dhcp_server(in_port)) LOG.debug("filter_bodys: %s", filter_bodys) _extend(ti.project_id, network_uuid, filter_bodys) # from external host to the instance LOG.debug('from=* to.id=%s', t.id) for (_, network_uuid, _, t_ips) in _from_bm_node(ti.id, ti.project_id): filter_bodys = [] for t_ip in t_ips: for sg in db.security_group_get_by_instance(ctxt, ti.id): rules = db.security_group_rule_get_by_security_group( ctxt, sg.id) for rule in rules: rule_f = _build_sg_rule_filter( t_ip + "/32", rule, EXTERNAL_SECURITY_GROUP_PRIORITY) filter_bodys.extend(rule_f) rule_f = _build_default_drop_filter(t_ip + "/32") filter_bodys.extend(rule_f) LOG.debug("filter_bodys: %s", filter_bodys) _extend(ti.project_id, network_uuid, filter_bodys) # Just to make lines short... _sg_rules = db.security_group_rule_get_by_security_group _build = _build_full_sg_rule_filter # from other instances to the instance for f in hosts: LOG.debug('from.id=%s to.id=%s', f.id, t.id) if f.id == t.id: continue if not f.instance_id: continue fi = db.instance_get(ctxt, f.instance_id) LOG.debug('from id=%s instance_id=%s', f.id, f.instance_id) for (in_port, network_uuid, mac, f_ips) in _from_bm_node(fi.id, fi.project_id): filter_bodys = [] for (_, _, _, t_ips) in _from_bm_node(ti.id, ti.project_id): for f_ip in f_ips: for t_ip in t_ips: for sg in db.security_group_get_by_instance( ctxt, ti.id): rules = _sg_rules(ctxt, sg.id) for rule in rules: if rule.cidr and not _in_cidr(f_ip, rule.cidr): continue rule_f = _build(in_port, mac, f_ip + "/32", t_ip + "/32", rule) filter_bodys.extend(rule_f) rule_f = _build_full_default_drop_filter(in_port, mac, f_ip + "/32", t_ip + "/32") filter_bodys.extend(rule_f) LOG.debug("filter_bodys: %s", filter_bodys) _extend(fi.project_id, network_uuid, filter_bodys) LOG.debug('begin update filters') for (tenant_id, nf) in tenants_networks_filters.iteritems(): for (network_id, filter_bodys) in nf.iteritems(): old_fids = _list_filters(conn, tenant_id, network_id) LOG.debug("delete filters tenant_id=%s network_id=%s ids=\n%s", tenant_id, network_id, _pp(old_fids)) _delete_filters(conn, tenant_id, network_id, old_fids) LOG.debug("create filters tenant_id=%s network_id=%s bodys=\n%s", tenant_id, network_id, _pp(filter_bodys)) _create_filters(conn, tenant_id, network_id, filter_bodys) LOG.debug('end update filters') LOG.debug('_fullbuild end')
def _get_baremetal_nodes(context): nodes = bmdb.bm_node_get_all(context, service_host=FLAGS.host) return nodes
def test_get_all0(self): r = db.bm_node_get_all(self.context) self.assertEquals(r, [])
def get_available_nodes(self, refresh=False): context = nova_context.get_admin_context() return [ str(n['uuid']) for n in db.bm_node_get_all(context, service_host=CONF.host) ]