def __init__(self, *args, **kwargs): """ Initializer for the Nova Client Service """ super(NovaSvc, self).__init__(*args, **kwargs) self.clients = RegionClient('compute', self._get_nova_client, self.token, self.region)
def __init__(self, *args, **kwargs): super(EONSvc, self).__init__(*args, **kwargs) self.data = self.request.get(api.DATA) self.datadata = self.data.get(api.DATA) self.clients = RegionClient(ENDPOINT_TYPE, self._get_eon_client, self.token, self.region) self.eon_client, _region = self.clients.get_client()
def test_two_regions(self, _mock_endpoints): """ In a two-region setup, a call to get one client must return just one client, while a call to get all clients should return a list with that client and one other. Also, calling them in either order (list first or single client first) must generate no unnecessary calls to the create function. """ _mock_endpoints.return_value = [ {'region': randomidentifier(), 'url': randomurl()}, {'region': randomidentifier(), 'url': randomurl()}] # # Get all clients first, then get just a single client # create_func = mock.Mock() client = RegionClient(randomidentifier(), create_func, get_mock_token(), randomidentifier()) client_list = list(client.get_clients()) self.assertEqual(2, create_func.call_count) single_client = client.get_client() self.assertEqual(2, create_func.call_count) self.assertEqual(2, len(client_list)) self.assertIn(single_client, client_list) # # Get single client first, then get all clients # create_func = mock.Mock() client = RegionClient(randomidentifier(), create_func, get_mock_token(), randomidentifier()) single_client = client.get_client() self.assertEqual(1, create_func.call_count) client_list = list(client.get_clients()) self.assertEqual(2, create_func.call_count) self.assertEqual(2, len(client_list)) self.assertIn(single_client, client_list)
def test_single_region(self, _mock_endpoints): """ In a single-region setup, a call to get one client must return a list containing just the single client, and calling them in either order (the list first then the single, or vise-versa) must result in just a single call to the create function. """ _mock_endpoints.return_value = [{'region': randomidentifier(), 'url': randomurl()}] # # Get all clients first, then get just a single client # create_func = mock.Mock() client = RegionClient(randomidentifier(), create_func, get_mock_token(), randomidentifier()) client_list = list(client.get_clients()) self.assertEqual(1, create_func.call_count) single_client = client.get_client() self.assertEqual(1, create_func.call_count) self.assertEqual(1, len(client_list)) self.assertEqual(single_client, client_list[0]) # # Get one client first, then get all clients # create_func = mock.Mock() client = RegionClient(randomidentifier(), create_func, get_mock_token(), randomidentifier()) single_client = client.get_client() self.assertEqual(1, create_func.call_count) client_list = list(client.get_clients()) self.assertEqual(1, create_func.call_count) self.assertEqual(1, len(client_list)) self.assertEqual(single_client, client_list[0])
class NovaSvc(service.SvcBase): """ The ``target`` value for this plugin is ``nova``. See :ref:`rest-api` for a full description of the request and response formats. """ SUGGESTED_POLL_INTERVAL = 5 def __init__(self, *args, **kwargs): """ Initializer for the Nova Client Service """ super(NovaSvc, self).__init__(*args, **kwargs) self.clients = RegionClient('compute', self._get_nova_client, self.token, self.region) def _get_nova_client(self, region=None, **kwargs): try: return novaclient.Client("2", session=self.token_helper.get_session(), endpoint_type=get_conf( "services.endpoint_type", default="internalURL"), region_name=region, user_agent=api.USER_AGENT) except Exception as e: LOG.error("Error creating nova client : %s ", e) raise requests.exceptions.HTTPError(e.message) @service.expose('hypervisor-stats') def hypervisor_stats(self): """ Get the statistics for cpu, memory, and storage across all hypervisors. In a multi-region environment, if no specific region is requested, then data from all regions will be returned. Request format:: "target": "nova", "operation": "hypervisor-stats" """ req_keys = ('vcpus_used', 'vcpus', 'local_gb_used', 'local_gb', 'memory_mb_used', 'memory_mb') sums = {key: 0 for key in req_keys} for client, region in self.clients.get_clients(): stats = client.hypervisors.statistics() for key in req_keys: sums[key] += getattr(stats, key, 0) # TODO: float is probably not necessary, check UI code return { 'used': { 'cpu': sums['vcpus_used'], 'memory': sums['memory_mb_used'], 'storage': sums['local_gb_used'] }, 'total': { 'cpu': float(sums['vcpus']), 'memory': float(sums['memory_mb']), 'storage': sums['local_gb'] } } @service.expose('service-list') def service_list(self): """ Return a list summarizing how many compute nodes are present, how many are up, and how many are in an error state. In a multi-region environment, if no specific region is requested, then data from all regions will be returned. Request format:: "target": "nova", "operation": "service-list" """ compute_list = [] for client, region in self.clients.get_clients(): compute_list.extend(client.services.list(binary="nova-compute")) up_nodes = len( [compute for compute in compute_list if compute.state == "up"]) down_nodes = len(compute_list) - up_nodes return { "ok": up_nodes, "error": down_nodes, "total": len(compute_list) } def _get_historical_data(self, start_date, end_date, resources): startdate = datetime.strptime(start_date, "%Y-%m-%d") enddate = datetime.strptime(end_date, "%Y-%m-%d") resources_count = {} today = 0 while startdate <= enddate: resources_count[enddate.isoformat()] = (today, 0) enddate = enddate + timedelta(days=-1) today = today - 1 for resource in resources: event_date = datetime.strptime(resource[0], "%Y-%m-%d").isoformat() day, count = resources_count[event_date] resources_count[event_date] = day, resource[1] response = sorted(resources_count.values(), key=lambda tup: tup[0]) average = round( (sum(x[1] for x in response) + 0.0) / len(resources_count), 2) data = {'data': response, 'average': average} return data def _deleted_servers(self, start_date): options = { 'all_tenants': True, 'deleted': True, "changes-since": start_date, 'sort_key': 'deleted_at', 'sort_dir': 'asc' } deleted = [] for client, region in self.clients.get_clients(): servers = client.servers.list(search_opts=options) for server in servers: output = vars(server) deleted_at = output.get('OS-SRV-USG:terminated_at') or \ output.get('updated') if deleted_at: deleted_at = deleted_at.split("T")[0] deleted.append(deleted_at) deleted = Counter(deleted).items() return deleted def _created_servers(self, start_date): options = { 'all_tenants': True, "changes-since": start_date, 'sort_key': 'created_at', 'sort_dir': 'asc' } created = [] for client, region in self.clients.get_clients(): servers = client.servers.list(search_opts=options) for server in servers: output = vars(server) launched_at = output.get('created') or \ output.get('OS-SRV-USG:launched_at') if launched_at: launched_at = launched_at.split("T")[0] if launched_at >= start_date: created.append(launched_at) created = Counter(created).items() return created @service.expose('servers-list') def servers_list(self): """ Get historical information about virtual machines created and deleted in a given time range. In a multi-region environment, if no specific region is requested, then data from all regions will be returned. Request format:: "target": "nova", "operation": "servers-list", "start_date": "2016-01-01T00:00:00", "end_date": "2016-02-01T00:00:00" """ start_date = self.request[api.DATA]['start_date'] end_date = self.request[api.DATA]['end_date'] start_date = datetime.strptime(start_date, "%Y-%m-%d").isoformat().split("T")[0] created_vms = self._created_servers(start_date) deleted_vms = self._deleted_servers(start_date) created_data = self._get_historical_data(start_date, end_date, created_vms) deleted_data = self._get_historical_data(start_date, end_date, deleted_vms) return {'created': created_data, 'deleted': deleted_data} @service.expose('hypervisor-list') def _hypervisor_list(self): """ Get list of hypervisors with details, optionally including their ping status from monasca. In a multi-region environment, if no specific region is requested, then data from all regions will be returned. Request format:: "target": "nova", "operation": "hypervisor-list", "include_status": True """ ret_hyp_list = [] include_status = self.data.get('include_status', True) for client, region in self.clients.get_clients(): hypervisor_list = client.hypervisors.list(detailed=True) for hypervisor in hypervisor_list: hypervisor_data = {} hypervisor_data["allocated_cpu"] = hypervisor.vcpus_used hypervisor_data["total_cpu"] = hypervisor.vcpus hypervisor_data["allocated_memory"] = hypervisor.memory_mb_used hypervisor_data["total_memory"] = hypervisor.memory_mb hypervisor_data["allocated_storage"] = hypervisor.local_gb_used hypervisor_data["total_storage"] = hypervisor.local_gb hypervisor_data["instances"] = hypervisor.running_vms name = getattr(hypervisor, hypervisor.NAME_ATTR, hypervisor.host_ip) hypervisor_data["name"] = name hypervisor_data["hypervisor_id"] = hypervisor.id hypervisor_data["status"] = hypervisor.status hypervisor_data["state"] = hypervisor.state hypervisor_data["type"] = hypervisor.hypervisor_type hypervisor_data["service_host"] = hypervisor.service['host'] hypervisor_data["hypervisor_hostname"] = \ hypervisor.hypervisor_hostname hypervisor_data["region"] = region ret_hyp_list.append(hypervisor_data) # Get host_alive_status:ping results for all compute hosts found if include_status: hostnames = [hd['service_host'] for hd in ret_hyp_list] statuses = self.call_service(target='monitor', operation='get_appliances_status', data={'hostnames': hostnames}) # Fill in the ping status details for each of the hosts for hyp in ret_hyp_list: hyp['ping_status'] = \ statuses.get(hyp['service_host'], 'unknown') return ret_hyp_list @service.expose('instance-list') def instance_list(self): """ Get list of instances and their details. If ``show_baremetal`` is set to ``False`` (the default), then baremetal instances will be excluded from the results. ``filter`` can be provided to return only those instances that match the filter, which can have the values: ``dbaas``, ``msgaas``, ``ci``, and ``project``; if ``filter`` is ``project``, then an additional ``project_id`` field should be provided to control that filter. In a multi-region environment, if no specific region is requested, then data from all regions will be returned. Request format:: "target": "nova", "operation": "instance-list", "show_baremetal": True or False, "filter": "dbaas" """ # by default, don't show baremetal instances show_baremetal = self.data.get('show_baremetal', False) proj_list = self.call_service(target='user_group', operation='project_list') proj_dict = {item['id']: item['name'] for item in proj_list} # if the baremetal service is available and we don't want to see # baremetal instances, we'll need a list of baremetal instance uuids # so that they can be filtered from the server list bm_uuid_list = [] baremetal_svc = self.token_helper.get_service_endpoint('baremetal') if baremetal_svc and not show_baremetal: bm_list = self.call_service(target='ironic', operation='node.list', region=self.region) bm_uuid_list = [bmi['instance_uuid'] for bmi in bm_list] # Create a list for the UI to use instance_list = [] search_opts = {'all_tenants': True} for client, region in self.clients.get_clients(): server_list = client.servers.list(search_opts=search_opts, limit=-1, detailed=True) # flavors tell us cpu/memory/disk allocated to the instance flavor_list = client.flavors.list(is_public=None, detailed=True) flavor_dict = {flavitem.id: flavitem for flavitem in flavor_list} image_list = [] if hasattr(client, 'images'): image_list = client.images.list() elif hasattr(client, 'glance'): image_list = client.glance.list() image_dict = {imgitem.id: imgitem for imgitem in image_list} for nova_inst in server_list: # filter out any baremetal instances if nova_inst.id in bm_uuid_list: continue instance = {} instance['name'] = nova_inst.name instance['status'] = nova_inst.status instance['host'] = nova_inst._info['OS-EXT-SRV-ATTR:host'] instance['availability_zone'] = \ nova_inst._info['OS-EXT-AZ:availability_zone'] instance['id'] = nova_inst.id try: instance['image'] = image_dict[nova_inst.image['id']].name except: # There are some instances tied to non-existent images instance['image'] = 'UNKNOWN' instance['addresses'] = nova_inst.addresses instance['created'] = nova_inst.created powernum = nova_inst._info['OS-EXT-STS:power_state'] instance['power_state'] = power_states.get( powernum, "UNKNOWN[%d]" % powernum) # tasks states defined in nova/nova/compute/task_states.py instance['task_state'] = \ nova_inst._info['OS-EXT-STS:task_state'] instance['key_name'] = nova_inst.key_name instance['metadata'] = nova_inst.metadata # get the project name. If it's not found, just get the # tenant id instead instance['project'] = proj_dict.get(nova_inst.tenant_id, nova_inst.tenant_id) instance['tenant_id'] = nova_inst.tenant_id instance['region'] = region try: flavor = flavor_dict[nova_inst.flavor['id']] instance['flavor'] = flavor.name instance['cpu'] = {'vcpus': flavor.vcpus} instance['memory'] = {'ram': flavor.ram} instance['storage'] = {'disk': flavor.disk} except: # There are some instances tied to flavors that don't # appear to show up in flavor-list instance['flavor'] = None instance['cpu'] = None instance['memory'] = None instance['storage'] = None self._populate_metrics(instance) instance_list.append(instance) return {'instances': instance_list} def _populate_metrics(self, instance): monasca_metrics = self.request[api.DATA].get('monasca_metrics') if isinstance(monasca_metrics, list): instance['metrics'] = dict() for metric in monasca_metrics: req_data = \ dict(self.request[api.DATA].get('monasca_data')) req_data['name'] = metric monasca_dimensions = \ self.request[api.DATA].get('monasca_dimensions') if isinstance(monasca_dimensions, dict): req_data['dimensions'] = dict() for key, value in monasca_dimensions.iteritems(): # if dict its more complicated if isinstance(value, dict): instanceKey = value.get('property') req_data['dimensions'][key] = \ instance[instanceKey] else: req_data['dimensions'][key] = value res = self.call_service(target='monitor', operation=req_data.get( 'operation', 'metric_statistics'), data=req_data) instance['metrics'][metric] = res @service.expose('service-delete') def service_delete(self): """ Delete the service which can be specified either by ``novaid`` or ``hostname``. Request format:: "target": "nova", "operation": "service-delete", "novaid": "ID" """ nova_id = self.data.get('novaid') host_name = self.data.get('hostname') if not nova_id and not host_name: raise Exception("Either novaid or hostname must be " "populated") found = False for client, region in self.clients.get_clients(): compute_list = client.services.list(binary="nova-compute") for instance in compute_list: if (nova_id and instance.id == nova_id) or \ (not nova_id and host_name == instance.host): client.services.delete(instance.id) # Also break out of the outer loop when the host is found found = True break if found: break if not found: raise Exception(self._("Could not find service to delete")) return self._("Executed nova service delete on service {}").format( nova_id) @service.expose('instance-delete', is_long=True) def server_delete(self, validate): """ Delete an instance. This function will not return until nova has completed the deletion of the instance, which is to say, when nova no longer returns it in its list of servers. Request format:: "target": "nova", "operation": "instance-delete", "instance_id": "ID" """ inst_id = self.data['instance_id'] if validate: for client, region in self.clients.get_clients(): # TODO: Test that this doesn't throw an exception if client.servers.get(inst_id): client.servers.delete(inst_id) self.client_used_for_delete = client break self.update_job_status('in progress', 25, task_id=inst_id) return self.SUGGESTED_POLL_INTERVAL else: # Now we call 'nova show' on that instance until it no longer # exists or something bad happens (keystone token times out, # nova has a problem, etc) while True: try: self.client_used_for_delete.servers.get(inst_id) sleep(self.SUGGESTED_POLL_INTERVAL) except NotFound: # No longer see the instance, so we're good! break self.response[api.DATA] = self._("instance {} deleted").format( inst_id) return self.response @classmethod def needs_services(cls): return ['compute']
class EONSvc(service.SvcBase): """ Provide functionality to handle all eon-related operations. Used for compute node activation, deactivation, delete and list. The ``target`` value for this plugin is ``eon``. See :ref:`rest-api` for a full description of the request and response formats. """ REQ_POLL_INT = 10 def __init__(self, *args, **kwargs): super(EONSvc, self).__init__(*args, **kwargs) self.data = self.request.get(api.DATA) self.datadata = self.data.get(api.DATA) self.clients = RegionClient(ENDPOINT_TYPE, self._get_eon_client, self.token, self.region) self.eon_client, _region = self.clients.get_client() def _get_eon_client(self, region=None, url=None): return eonclient.get_client('2', os_token=self.token, eon_url=url, user_agent=api.USER_AGENT, insecure=get_conf("insecure")) @service.expose() def prepare_activate_template(self): """ Returns an activation template for the given host type using data from the ardana service. Request format:: "target": "eon", "operation": "prepare_activate_template" """ type = self.datadata.get('type') if type == 'esxcluster': all_network_names = self.call_service( target="ardana", operation="do_path_operation", data={"path": "model/entities/networks"}) all_network_group_names = self.call_service( target="ardana", operation="do_path_operation", data={"path": "model/entities/network-groups"}) filtered_network_groups = [] for nw_group in all_network_group_names: if nw_group.get("tags") and isinstance(nw_group["tags"], list): for tag in nw_group['tags']: if isinstance(tag, dict): if 'neutron.networks.vxlan' in tag.keys() or \ 'neutron.networks.vlan' in tag.keys(): filtered_network_groups.append(nw_group.get( 'name')) elif isinstance(tag, str): if tag in ['neutron.networks.vlan', 'neutron.networks.vxlan']: filtered_network_groups.append(nw_group.get( 'name')) cloud_trunk_count = 0 filtered_network_names = [] for network in all_network_names: if network.get('network-group') in filtered_network_groups: cloud_trunk_count += 1 filtered_network_names.append(network) return { "cloud_trunk": cloud_trunk_count, "mgmt_trunk": 1, "server_groups": [], "network_names": filtered_network_names } elif type in ['hyperv', 'hlinux', 'rhel']: control_planes = self.call_service( target="ardana", operation="do_path_operation", data={"path": "model/entities/control-planes"}) server_roles_filters = [] if type in ['hlinux', 'rhel']: server_roles_filters.append('nova-compute-kvm') elif type == 'hyperv': server_roles_filters.append('nova-compute-hyperv') server_roles = [] for plane in control_planes: for resource in plane.get('resources'): for component in resource.get('service-components'): if component in server_roles_filters: server_roles.append(resource) all_server_groups = self.call_service( target="ardana", operation="do_path_operation", data={"path": "model/entities/server-groups"}) server_groups = [] for server_group in all_server_groups: has_srv_grp = server_group.get("server-groups", None) if not has_srv_grp: server_groups.append(server_group) response = { "server_roles": server_roles, "server_groups": server_groups } if type != 'hyperv': nic_mappings = self.call_service( target="ardana", operation="do_path_operation", data={"path": "model/entities/nic-mappings"}) response['nic_mappings'] = nic_mappings return response raise Exception(self._("Invalid Resource Type Specified")) def _validate_resource_state(self, id, in_progress_states, error_states): eonsvc_response = self.eon_client.get_resource(id) while eonsvc_response['state'] in in_progress_states: time.sleep(self.REQ_POLL_INT) eonsvc_response = self.eon_client.get_resource(id) self.update_job_status(msg=eonsvc_response, percentage_complete=10) if eonsvc_response['state'] in error_states: self.response.error( self._("Activation Failed for Resource with {0},{1}").format( eonsvc_response.get('id', ''), eonsvc_response.get('name', ''))) @service.expose(is_long=True) def deactivate_resource(self, validate): """ Deactivates one or more resources from eon in a long-running operation. This function uses a very strange and counter-intuitive request format:: "target": "eon", "operation": "deactivate_resource", "ids" : { "MYID1": "True", "MYID2": "False", ... "MYIDN": "False", } The value of each item in the dictionary, "True" or "False", will indicate whether the ``forced`` option will be used when calling eon's ``deactivate_resource`` for that id. """ if validate: self.response[api.DATA] = {'ids': self.datadata.get('ids')} return self.REQ_POLL_INT ids = self.datadata['ids'] for id, forced_str in ids.iteritems(): try: data = {"forced": forced_str.lower() != "false"} self.eon_client.deactivate_resource(id, data) eonsvc_response = self.eon_client.get_resource(id) while eonsvc_response['state'] in ["deactivating"]: time.sleep(self.REQ_POLL_INT) eonsvc_response = self.eon_client.get_resource(id) self.response[api.DATA]['ids'].\ update({id: {api.STATUS: api.STATUS_INPROGRESS}}) self.update_job_status(percentage_complete=10) self.response[api.DATA]['ids'].update({id: {api.STATUS: api.COMPLETE}}) except Exception as e: message = {"ID:" + id + " failed with " "reason " + e.details} LOG.exception(message) self.response[api.DATA]['ids'].update({id: {api.STATUS: api.STATUS_ERROR}}) return self.response @service.expose(is_long=True) def activate_resource(self, validate): """ Activates a resource using eon's ``activate_resource`` API in a long-running operation. Request format:: "target": "eon", "operation": "activate_resource", "data" : { "data" : { "id": "MYID", "type": "MYTYPE", "network_config": ... ... } """ if validate: self.response[api.DATA] = {'id': self.datadata.get('id'), 'state': 'activating'} return self.REQ_POLL_INT type_resource = self.datadata.get('type', '') id = self.datadata.get('id', '') network_config = self.datadata.get('network_config') self.response[api.DATA] = {'id': id, 'state': 'activating'} self.update_job_status(percentage_complete=10) if id and type_resource == 'esxcluster' and network_config: mgmt_trunk = network_config.get("mgmt_trunk", []) if mgmt_trunk and len(mgmt_trunk) > 0: network_config['mgmt_trunk'] = network_config[ 'mgmt_trunk'][0] if network_config['mgmt_trunk'].get('server_group', None): del network_config['mgmt_trunk']['server_group'] activation_template = self.eon_client. \ get_resource_template(type_resource, network_config) input_model = activation_template.get('input_model', None) if input_model: # TODO: EON needs server group as RACK1 as of now activation_template['input_model']['server_group'] \ = DEFAULT_SERVER_GROUP self.update_job_status(percentage_complete=20) self.eon_client.activate_resource(id, activation_template) self._validate_resource_state(id, ["activating", "provisioning", "provision-initiated"], ["provisioned", "imported"]) elif id != '' and type_resource in ["hlinux", "rhel", "hyperv"] \ and network_config: if bool(self.datadata.get("is_modified")): self.eon_client.update_resource(id, self.datadata.get( 'resource')) activation_template = self.eon_client.\ get_resource_template(type_resource, {}) self.update_job_status(percentage_complete=20) # Fix for defect: OPSCON-1279 if type_resource == "rhel": if self.datadata.get("run_disk_config") == "True": activation_template["skip_disk_config"] = False else: activation_template["skip_disk_config"] = True if self.datadata.get("run_wipe_disks") == "True": activation_template["run_wipe_disks"] = True else: activation_template["run_wipe_disks"] = False input_model = activation_template.get('input_model', None) if not input_model: raise Exception(self._( "Could not get Activation Template from EON")) input_model['nic_mappings'] = network_config.get( 'nic_mappings', "") input_model['server_group'] = \ network_config.get('server_group', "") input_model['server_role'] = \ network_config.get('server_role', "") activation_template['input_model'] = input_model self.eon_client.activate_resource(id, activation_template) self._validate_resource_state(id, ["activating", "provisioning"], ["provisioned"]) else: raise Exception(self._("Invalid Resource Type Specified or " "Empty/Invalid Network Configuration was sent")) return self.response @service.expose() def resource_list(self): """ Returns a list of resources known to eon. Request format:: "target": "eon", "operation": "resource_list", "type": "TYPE", "state": "STATE" """ resource_type = self.data.get('type') resource_state = self.data.get('state') result_list = [] for client, region in self.clients.get_clients(): resource_list = client.get_resource_list(resource_type, resource_state) for resource in resource_list: resource['region'] = region result_list.append(resource) return result_list @service.expose() def resource_get(self): """ Returns a details of a resource from eon. Request format:: "target": "eon", "operation": "resource_get", "id": "MYID" """ for client, region in self.clients.get_clients(): try: resource = client.get_resource(self.data['id']) resource['region'] = region return resource except Exception: pass raise Exception(self._("Resource not found in eon")) @service.expose() def register_compute(self): """ Register a compute resource with eon. Request format:: "target": "eon", "operation": "resource_compute", "name": "MYNAME", "ip_address": "MYIP", "type": "MYTYPE", "username": "******", "password": "******", "port": "MYPORT" """ resource_host_data = { "name": self.datadata['name'], "ip_address": self.datadata['ip_address'], "type": self.datadata['type'], "username": self.datadata['username'], "password": self.datadata['password'], "port": self.datadata['port'] } return self.eon_client.add_resource(resource_host_data) @service.expose() def delete_resource(self): """ Delete one ore more compute resources from eon. Request format:: "target": "eon", "operation": "delete_resource", "data": {"data": {"ids": ["MYID1", "MYID2"...]}} """ if is_stdcfg(): return self.eon_client.delete_resource(self.datadata['id']) else: datadata = self.data.get(api.DATA) self.response[api.DATA] = [] ids = datadata['ids'] for id in ids: try: self.eon_client.delete_resource(id) self.response[api.DATA].append({api.STATUS: api.COMPLETE}) except Exception as e: self.response[api.DATA].append( {api.STATUS: api.STATUS_ERROR, api.DATA: e.message}) return self.response @service.expose() def get_resource_mgr(self): """ Returns the eon resource manager with the given id. Request format:: "target": "eon", "operation": "get_resource_mgr", "id": "MYID" """ return self.eon_client.get_resource_mgr(self.data['id']) @classmethod def needs_services(cls): return [ENDPOINT_TYPE, "ardana"]
class IronicSvc(service.SvcBase): """ The ``target`` value for this plugin is ``ironic``. See :ref:`rest-api` for a full description of the request and response formats. This really should be called the baremetal service, but unfortunately, there is already one with that name containing services that really belong in eon_service. """ def __init__(self, *args, **kwargs): super(IronicSvc, self).__init__(*args, **kwargs) self.clients = RegionClient('baremetal', self._get_ironic_client, self.token, self.region) def _get_ironic_client(self, region=None, url=None, **kwargs): return ironic_client.get_client( 1, os_auth_token=self.token, ironic_url=url, os_region_name=region, user_agent=api.USER_AGENT, insecure=get_conf("insecure")) @service.expose('node.list') def list_nodes(self): """ Request format:: "target": "ironic", "operation": "node.list" :return: List of nodes from the ironic service """ nodelist = [] for client, region in self.clients.get_clients(): for node in client.node.list(): nodelist.append(node.to_dict()) return nodelist @service.expose('node.get') def get_node(self): """ Get details for a specific node. Request format:: "target": "ironic", "operation": "node.get", "node_id": "MYNODEID" """ for client, region in self.clients.get_clients(): node = client.node.get(node_id=self.data['node_id']) if node: return node.to_dict() @service.expose('baremetal-list') def baremetal_list(self): """ Return a list of nodes from nova and ironic. Details are return from both nova and ironic only for baremetal instances. Request format:: "target": "ironic", "operation": "baremetal-list" """ inst_list = self.call_service(target='nova', operation='instance-list', data={'show_baremetal': True}, region=self.region )['instances'] inst_dict = {item['id']: item for item in inst_list} agg_list = [] for node in self.list_nodes(): details = { 'baremetal': node, 'compute': inst_dict.get(node['instance_uuid']) } agg_list.append(details) return agg_list @service.expose('node.delete') def delete_node(self): """ Deletes a node from ironic. Request format:: "target": "ironic", "operation": "node.delete", "node_id": "MYNODEID" :returns: ``None`` when the operation completes successfully """ for client, region in self.clients.get_clients(): client.node.delete(node_id=self.data['node_id']) @classmethod def needs_services(cls): return ['baremetal']
def __init__(self, *args, **kwargs): super(IronicSvc, self).__init__(*args, **kwargs) self.clients = RegionClient('baremetal', self._get_ironic_client, self.token, self.region)