def _get_session_keystone_v3(): """ Returns a keystone session variable. """ from keystoneauth1.identity import v3 from keystoneauth1 import session from keystoneclient.v3 import client user, password, auth_uri, project_name, project_id, user_domain_name = _get_connection_info( '3') auth = v3.Password(auth_url=auth_uri, username=user, password=password, project_id=project_id, user_domain_name=user_domain_name) envs = [ user, password, auth_uri, project_name, project_id, user_domain_name ] msg = "AUTH with user ({e[0]}), password (****), auth_uri ({e[2]}), " \ " project_name ({e[3]}), project_id ({e[4]}) " \ "and user_domain_name ({e[5]}).".format(e=envs) LOG.info(msg) sess = session.Session(auth=auth) return sess
def put_geolocation(): """ Stores the geolocation of the nodes to the database """ LOG.info("Accessing URL %s", request.url) now_ts = time.time() error_log = [] if not request.data: err_msg = "No coordinate data" abort(400, err_msg) data = ast.literal_eval(request.data) for obj in data: LOG.info("Updating coordinates of nodes %s", obj['id']) geo_string = json.dumps(obj['geo']) attrs = {"geo": geo_string} updated, msg = LANDSCAPE.graph_db.update_node(obj['id'], now_ts, extra_attrs=attrs) if not updated: error_log.append((obj["id"], msg)) if error_log: err_msg = "Error with the following nodes:" + str(error_log) abort(400, err_msg) return Response(status=200, mimetype=MIME)
def _add_task(self, task, timestamp): """ Adds a Docker task node to the graph database. :param task: Docker task object. :param timestamp: timestamp. """ LOG.info("[DOCKER] Adding a task node the Graph") if task['DesiredState'] == 'running': identity, state = self._create_docker_task_nodes(task) uuid = task["ID"] node_id = task["NodeID"] container_id = task["Status"]['ContainerStatus']['ContainerID'] service_id = task["ServiceID"] task_node = self.graph_db.add_node(uuid, identity, state, timestamp) LOG.warn(task_node) if task_node is not None: docker_node = self.graph_db.get_node_by_uuid(node_id) container_node = self.graph_db.get_node_by_uuid(container_id) service_node = self.graph_db.get_node_by_uuid(service_id) if docker_node and container_node: self.graph_db.add_edge(container_node, docker_node, timestamp, RELS['docker_container']) if container_node and task_node: self.graph_db.add_edge(task_node, container_node, timestamp, RELS['container_task']) if task_node and service_node: self.graph_db.add_edge(service_node, task_node, timestamp, RELS['task_service'])
def _add_physical_machine(self, machine, timestamp): """ Add a machine to graph database using the hwloc and cpuinfo files for a machine. :param machine: Machine name. :param timestamp: Epoch timestamp """ identity = self.graph_db.get_node_by_uuid(machine) if identity: LOG.error( "Machine : %s exists in an inactive state in the landscape.", machine) hwloc = self._get_hwloc(machine) if hwloc is not None: LOG.info("HWLocCollector - Adding machine: %s", machine) graph = self._create_nxgraph_from_hwloc(hwloc, machine) cpu_info = self._get_cpu_info(machine) if cpu_info is not None: self._enrich_graph_cpuinfo(graph, cpu_info) else: LOG.error("No cpu info for machine: %s", machine) # Store the physical host in the graph database. self._add_coordinates(graph, machine) self._filter_nodes(graph) self.store_nxgraph_to_graph_db(graph, self.graph_db, timestamp) else: LOG.error("No hwloc details for machine: %s", machine)
def _remove_physical_machine(self, machine, timestamp): identity = self.graph_db.get_node_by_uuid(machine) if identity: self.graph_db.delete_node(identity, timestamp) LOG.info("Machine : %s deleted from landscape", machine) else: LOG.error("Machine : %s not in the landscape to delete!", machine)
def get_devices(self): # try: cimi_url = self.cnf.get_variable(CONFIG_SECTION_GENERAL, CONFIG_CIMI_URL) if cimi_url is None: LOG.error( "'CIMI_URL' has not been set in the 'general' section of the config file" ) return dict() # TODO: certificate authentication issues if cimi_url.lower().find('https') > 0: requests.packages.urllib3.disable_warnings(InsecureRequestWarning) res = requests.get(cimi_url + '/device', headers={'slipstream-authn-info': 'internal ADMIN'}, verify=SSL_VERIFY) if res.status_code == 200: LOG.info("CIMI Connection OK. Devices returned: " + str(len(res.json()['devices']))) return res.json()['devices'] LOG.error("Request failed: " + str(res.status_code)) LOG.error("Response: " + str(res.text)) return dict()
def init_graph_db(self): """ Adds the instances to the graph database and connects them to the relevant machine nodes. """ LOG.info( "ContainerCollector - Adding Docker infrastructure components to the landscape." ) now_ts = time.time() nodes = [ x for x in self.swarm_manager.nodes.list() if x.attrs["Status"]["State"] == 'ready' ] for node in nodes: node_id = node.attrs["ID"] hostname = node.attrs['Description']['Hostname'] if 'ManagerStatus' in node.attrs: addr = node.attrs['ManagerStatus']['Addr'] else: addr = node.attrs['Status']['Addr'] state_attributes = self._get_instance_info(node) self._add_instance(node_id, addr, hostname, state_attributes, now_ts) LOG.info( "ContainerCollector - Docker infrastructure components added.")
def update_graph_db(self, event, body): """ Updates, adds and deletes cinder volumes based on the event type. :param event: Event type. :param body: Event details. """ LOG.info("[CINDER] Cinder event received: %s.", event) now_ts = time.time() uuid = body.get("payload", dict()).get("volume_id", "UNDEFINED") size = body.get("payload", dict()).get("size", "UNDEFINED") hostname = body.get("payload", dict()).get("host", "UNDEFINED") if hostname: if "#" in hostname: hostname = hostname.split("#")[0] if "@" in hostname: hostname = hostname.split('@')[0] attachments = body.get("payload", dict()).get('volume_attachment', []) vm_id = "UNDEFINED" for attachment in attachments: attach_status = attachment.get("attach_status", "UNDEFINED") if attach_status == "attached": vm_id = attachment.get('instance_uuid', "UNDEFINED") if event in DELETE_EVENTS: self._delete_volume(uuid, now_ts) elif event in UPDATE_EVENTS: self._update_volume(uuid, size, hostname, vm_id, now_ts) elif event in ADD_EVENTS: self._add_volume(uuid, size, hostname, vm_id, now_ts)
def get_subgraph(node_id): """ Returns the subgraph using a node id as the start. """ LOG.info("Retrieving Subgraph with url %s", request.url) timestamp = request.args.get("timestamp") time_frame = request.args.get("timeframe", 0) geo = _bool(request.args.get("geo", False)) # filter arguments. filter_these = _bool(request.args.get("filter-these", True)) filter_node = request.args.get("filter-nodes", []) # Fetch the subgraph. subgraph = LANDSCAPE.graph_db.get_subgraph(node_id, timestmp=timestamp, timeframe=time_frame) if not subgraph: err_msg = "Node with ID '{}', not in the landscape.".format(node_id) LOG.error(err_msg) abort(400, err_msg) if filter_node: filter_node = ast.literal_eval(filter_node) subgraph = util_graph.filter_nodes(subgraph, filter_node, filter_these) if geo: subgraph = Geo.extract_geo(subgraph) return Response(subgraph, mimetype=MIME)
def add_new_device(): """ Adds a new device to the physical layer """ LOG.info("Accessing URL %s", request.url) now_ts = time.time() error_log = [] if not request.data: err_msg = "No device data in body" abort(400, err_msg) LOG.debug(request.data) data = ast.literal_eval(request.data) # get config manager from landscaper.utilities import configuration conf_manager = configuration.ConfigurationManager() conf_manager.add_section('physical_layer') conf_manager.add_section('general') # save file to disk from landscaper.collector.cimi_physicalhost_collector import CimiPhysicalCollector cimi_updater = CimiPhysicalCollector(None, conf_manager, None, None) cimi_updater.generate_files(data) if error_log: err_msg = "Error with the following nodes:" + str(error_log) abort(400, err_msg) return Response(status=201, mimetype=MIME)
def get_swarm_manager(docker_conf): """ Retrieves Docker client object or error :return client object or error """ # if docker_conf[2] and docker_conf[3]: # tls_config = docker.tls.TLSConfig( # client_cert=(docker_conf[2], docker_conf[3]) # ) # else: # tls_config = False # # manager_address = ContainerCollectorV1.get_connection_string(docker_conf) # client = docker.DockerClient(base_url=manager_address, tls=tls_config) client = docker.from_env() try: if client.swarm.init(): LOG.info("Node joined swarm") except docker.errors.APIError: LOG.info("Node already part of swarm") try: return client except KeyError as err: raise err
def init_graph_db(self): """ Adds all neutron ports, nets and subnets to the graph database. """ LOG.info("[NEUTRON] Adding Neutron components to the landscape.") now_ts = time.time() # Collect Networks networks = self.neutron.list_networks() for net in networks.get('networks', list()): net_id = net.get('id', "UNDEFINED") net_name = net.get('name', "UNDEFINED") self._add_network(net_id, net_name, now_ts) # Collect subnets subnets = self.neutron.list_subnets() for subnet in subnets.get('subnets', list()): subnet_id = subnet.get('id', "UNDEFINED") cidr = subnet.get('cidr', "UNDEFINED") network_id = subnet.get('network_id', "UNDEFINED") self._add_subnet(subnet_id, cidr, network_id, now_ts) # Collect ports ports = self.neutron.list_ports() for port in ports.get('ports', list()): port_id = port.get("id", "UNDEFINED") mac, fixed_ip, device_id, net_id = self._get_port_info(port) self._add_port(port_id, mac, fixed_ip, device_id, net_id, now_ts)
def on_connection_revived(self): """ Method called when a connection to the broker is successfully made. """ super(OSMQueueConsumer, self).on_connection_revived() self.retry_tracker = 0 info_msg = "Connected to Openstack message queue at address: %s." LOG.info(info_msg, self.connection.as_uri())
def init_graph_db(self): LOG.info("[PHYS NETWORK] Adding physical network.") net_description = self._network_description(paths.NETWORK_DESCRIPTION) # Use two loops for inter switch connections. for switch, switch_info in net_description.iteritems(): self._add_switch(switch, switch_info, time.time()) for switch, switch_info in net_description.iteritems(): self._connect_switches(switch, switch_info, time.time())
def update_graph_db(self, event, body): """ Updates instances. This method is called by the events manager. :param event: The event that has occurred. :param body: The details of the event that occurred. """ LOG.info("Processing event received: %s", event) now_ts = time.time() self._process_event(now_ts, event, body)
def init_graph_db(self): """ Add Volume nodes to the landscape. """ LOG.info("[CINDER] Adding Cinder components to the landscape.") now_ts = time.time() for volume in self.cinder.volumes.list(): volume_id, size, hostname, vm_id = self._get_volume_info(volume) self._add_volume(volume_id, size, hostname, vm_id, now_ts)
def init_graph_db(self): """ Build the physical layer machines and constituent components and add to the graph database. """ LOG.info("Adding physical machines to the landscape...") now_ts = time.time() machines = self.conf_mgr.get_machines() self._add_physical_machine_threads(machines, now_ts) LOG.info("Finished adding physical machines to the landscape.")
def init_graph_db(self): """ Adds the instances to the graph database and connects them to the relevant machine nodes. """ LOG.info("[NOVA] Adding Nova components to the landscape.") now_ts = time.time() for instance in self.nova.servers.list(): vcpus, mem, name, hostname = self._get_instance_info(instance) self._add_instance(instance.id, vcpus, mem, name, hostname, now_ts)
def init_graph_db(self): """ Adds the instances to the graph database and connects them to the relevant machine nodes. """ LOG.info("[EDISK] Adding ephemeral_disk components to the landscape.") now_ts = time.time() self._retrieve_instance_disks() for instance_id, disk_obj in self.instance_disks.iteritems(): self.attach_disk_to_instance(instance_id, disk_obj, now_ts)
def _add_service(self, service, timestamp): """ Adds a Docker service node to the graph database. :param service: Docker Service object. :param timestamp: timestamp. """ LOG.info("[DOCKER] Adding a service node the Graph") identity, state = self._create_docker_service_nodes(service) uuid = service.attrs["ID"] # WHY IS THIS DIFFERENT TO CONT? WHY DOCKER WHY? service_node = self.graph_db.add_node(uuid, identity, state, timestamp)
def _consume_notifications(self): """ Consume notification from Swarm notification queue. """ LOG.info("Attempting to connect to address: %s", self.connection_string) client = self._get_leader_client() for event in client.events(): self._cb_event(event) return
def init_graph_db(self): """ Adds stack nodes to the graph database and connects them to the stack's vms. """ LOG.info("[HEAT] Adding Heat components to the landscape.") now_ts = time.time() for stack in self.heat.stacks.list(): if stack.stack_status == 'CREATE_COMPLETE': self._add_stack(stack, now_ts)
def generate_files(self, device, dynamic={}): """ Queries the hwloc and cpuinfo methods and writes them to a file :param device: CIMI Device object containing hwloc and cpu_info methods :param dynamic: CIMI device-dynamic object pertaining to the device object :return: True if file successfully saved and hostname, False if errors encountered """ hostname = "" device_id = device['id'] try: hwloc = device.get("hwloc") if hwloc is None: LOG.error("hwLoc data has not been set for this device: " + device_id + ". No HwLoc file will be saved.") return False cpu_info = device.get("cpuinfo") if cpu_info is None: LOG.error("CPU_info data has not been set for this device: " + device_id + ". No CPU_info file will be saved.") if dynamic: hwloc, hostname = self._parse_hwloc(device, hwloc, dynamic) LOG.info("Dynamic data has been set for this device: " + device_id) else: hwloc, hostname = self._parse_hwloc(device, hwloc) LOG.error("Dynamic data has not been set for this device: " + device_id + ". No dynamic file will be saved.") self.device_dict[device_id] = hostname # save the dynamic info to file if dynamic: dynamic_path = os.path.join(paths.DATA_DIR, hostname + "_dynamic.add") self._write_to_file(dynamic_path, json.dumps(dynamic)) # save the cpu info to file if cpu_info: cpu_path = os.path.join(paths.DATA_DIR, hostname + "_cpuinfo.txt") self._write_to_file(cpu_path, cpu_info) # save the hwloc to file hwloc_path = os.path.join(paths.DATA_DIR, hostname + "_hwloc.xml") self._write_to_file(hwloc_path, hwloc) except Exception as ex: LOG.error( "General Error hwloc/cpuinfo for device: {} - Error message: {}" .format(device['id'], ex.message)) return False, None return True, hostname
def get_variable(self, section, variable): """ Returns the value of a variable from a given section. :param section: section to be loaded (string) :param variable: name of the variable (string) :return: string """ if variable in self.get_variable_list(section): sect = getattr(self, section) return sect[variable] LOG.info('Config: Cannot find %s in section %s', variable, section) return None
def get_variable_list(self, section): """ Returns the list of the available variables in a section :param section: section to be loaded (string) :return: list """ try: return getattr(self, section) except AttributeError: err_msg = 'Config: Section {} not found'.format(section) LOG.info(err_msg) raise ValueError(err_msg)
def _consume_notifications(self): """ Consume notification from Swarm notification queue. """ LOG.info("Subscribing to Docker events...") client = self._get_leader_client() for event in client.events(): LOG.info(event) self._cb_event(event) return
def listen_for_events(self): """ Entry point for the child event listener. """ msg = "Connecting to Openstack message queue at address: %s." with Connection(self.connection_string) as conn: consumer = OSMQueueConsumer(conn, self._queues(), self._cb_event) try: LOG.info(msg, self.connection_string) consumer.run() except exceptions.KombuError as exc: LOG.error(exc, exc_info=1)
def _cb_event(self, body, message): """ Callback which is automatically called when an event is received on the notification queue. It dispatches the event to the registered handler. """ try: event = body['event_type'] LOG.info("event: %s", event) if event in EVENTS: self.events_manager.dispatch_event(event, body) message.ack() except TypeError: pass
def update_graph_db(self, event, body): """ Updates the heat elements in the graph database. :param event: The event that has occurred. :param body: The details of the event that occurred. """ from heatclient.exc import NotFound LOG.info("[HEAT] Processing event received: %s", event) now_ts = time.time() uuid = body.get('payload', dict()).get('stack_identity', 'UNDEFINED') if '/' in uuid: uuid = uuid.rsplit('/', 1)[1] try: stack = self.heat.stacks.get(uuid) if event in ADD_EVENTS: LOG.info("HEAT: Adding stack: %s", stack) self._add_stack(stack, now_ts) elif event in UPDATE_EVENTS: LOG.info("HEAT: Updating stack: %s", stack) self._update_stack(stack, now_ts) elif event in DELETE_EVENTS: LOG.info("HEAT: deleting stack: %s", stack) self._delete_stack(uuid, now_ts) except NotFound: LOG.warn("HEAT: Stack with UUID %s not found", uuid)
def update_graph_db(self, event, body): """ Adds new hosts to the physical layer when new hwloc file added to /data directory """ LOG.info("HWLocCollector - event received: %s %s", event, body) folder, filename = os.path.split(body) # only process hwloc files added if filename[-10:] == "_hwloc.xml": device_id = filename[:-10] if event == pyinotify.IN_CREATE: LOG.info("HWLocCollector - processing: %s", filename[:-10]) self._add_physical_machine(device_id, time.time()) elif event == pyinotify.IN_DELETE: self._remove_physical_machine(device_id, time.time())