def run(self): pool_id = self.parameters['Pool.pool_id'] Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleting pool-id %s" % self.parameters['Pool.pool_id'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) crud = Crud() ret_val = crud.delete("pool", pool_id) if ret_val['response'] is not None and \ ret_val['response']['error'] is True: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Failed to delete pool %s." " Error: %s" % (self.parameters['Pool.poolname'], ret_val['error_status']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False # TODO(shtripat) Use namespace tree and not etcd orm later NS.etcd_orm.client.delete("clusters/%s/Pools/%s" % (NS.tendrl_context.integration_id, self.parameters['Pool.pool_id']), recursive=True) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleted pool-id %s" % self.parameters['Pool.pool_id'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return True
def run(self): pool_id = self.parameters['Pool.pool_id'] Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleting pool-id %s" % self.parameters['Pool.pool_id'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) crud = Crud() resp = crud.delete("pool", pool_id) try: crud.sync_request_status(resp['request']) except RequestStateError as ex: Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "Failed to delete pool %s." " Error: %s" % (self.parameters['Pool.poolname'], ex) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False # TODO(shtripat) Use namespace tree and not etcd orm later NS._int.wclient.delete("clusters/%s/Pools/%s" % (NS.tendrl_context.integration_id, self.parameters['Pool.pool_id']), recursive=True) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleted pool-id %s" % self.parameters['Pool.pool_id'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return True
def run(self): attrs = dict(pool_id=self.parameters['Rbd.pool_id'], size=str(self.parameters['Rbd.size'])) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Re-sizing rbd %s on pool %s to %s" % (self.parameters['Rbd.name'], self.parameters['Rbd.pool_id'], self.parameters['Rbd.size']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) crud = Crud() ret_val = crud.update("rbd", self.parameters['Rbd.name'], attrs) if ret_val['response'] is not None and \ ret_val['response']['error'] is True: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Failed to resize rbd %s." " Error: %s" % (self.parameters['Rbd.name'], ret_val['error_status']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Successfully re-sized rbd %s on pool-id %s to " "%s" % (self.parameters['Rbd.name'], self.parameters['Rbd.pool_id'], self.parameters['Rbd.size']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return True
def run(self): vol_id = self.parameters['Volume.vol_id'] Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Stopping the volume %s before delete" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) subprocess.call([ 'gluster', 'volume', 'stop', self.parameters.get('Volume.volname'), '--mode=script' ]) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleting the volume %s" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) subprocess.call([ 'gluster', 'volume', 'delete', self.parameters.get('Volume.volname'), '--mode=script' ]) NS.etcd_orm.client.delete("clusters/%s/Volumes/%s" % (NS.tendrl_context.integration_id, self.parameters['Volume.vol_id']), recursive=True) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleted the volume %s" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) return True
def run(self): attrs = dict(pool_id=self.parameters['Rbd.pool_id'], size=str(self.parameters['Rbd.size'])) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Re-sizing rbd %s on pool %s to %s" % (self.parameters['Rbd.name'], self.parameters['Rbd.pool_id'], self.parameters['Rbd.size']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) crud = Crud() resp = crud.update("rbd", self.parameters['Rbd.name'], attrs) try: crud.sync_request_status(resp['request']) except RequestStateError as ex: Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "Failed to resize rbd %s." " Error: %s" % (self.parameters['Rbd.name'], ex) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Successfully re-sized rbd %s on pool-id %s to " "%s" % (self.parameters['Rbd.name'], self.parameters['Rbd.pool_id'], self.parameters['Rbd.size']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return True
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleting ec-profile %s" % self.parameters['ECProfile.name'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) crud = Crud() resp = crud.delete("ec_profile", self.parameters['ECProfile.name']) try: crud.sync_request_status(resp['request']) except RequestStateError as ex: Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "Failed to delete ec-profile %s." " Error: %s" % (self.parameters['ECProfile.name'], ex) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False NS._int.wclient.delete("clusters/%s/ECProfiles/%s" % (NS.tendrl_context.integration_id, self.parameters['ECProfile.name']), recursive=True) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleted ec-profile %s" % self.parameters['ECProfile.name'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return True
def expand_gluster(parameters): node_ips = get_node_ips(parameters) plugin = NS.gluster_provisioner.get_plugin() Event( Message(job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={ "message": "Setting up gluster nodes %s" % parameters['TendrlContext.integration_id'] })) ret_val = plugin.setup_gluster_node(node_ips, repo=NS.config.data.get( 'glusterfs_repo', None)) if ret_val is not True: raise FlowExecutionFailedError("Error setting up gluster node") Event( Message(job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={ "message": "Expanding gluster cluster %s" % parameters['TendrlContext.integration_id'] })) failed_nodes = [] for node in node_ips: ret_val = plugin.expand_gluster_cluster(node) if not ret_val: failed_nodes.append(node) if failed_nodes: raise FlowExecutionFailedError( "Error expanding gluster cluster. Following nodes failed: %s" % ",".join(failed_nodes)) Event( Message(job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={ "message": "Expanded Gluster Cluster %s." " New nodes are: %s" % (parameters['TendrlContext.integration_id'], ",".join(node_ips)) }))
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleting ec-profile %s" % self.parameters['ECProfile.name'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) crud = Crud() ret_val = crud.delete("ec_profile", self.parameters['ECProfile.name']) if ret_val['response'] is not None and \ ret_val['response']['error'] is True: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Failed to delete ec-profile %s." " Error: %s" % (self.parameters['ECProfile.name'], ret_val['error_status']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False NS.etcd_orm.client.delete("clusters/%s/ECProfiles/%s" % (NS.tendrl_context.integration_id, self.parameters['ECProfile.name']), recursive=True) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleted ec-profile %s" % self.parameters['ECProfile.name'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return True
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={"message": "Checking if update parameters are valid"}, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) if 'Pool.poolname' in self.parameters and \ ('Pool.pg_num' in self.parameters or 'Pool.size' in self.parameters or 'Pool.pg_num' in self.parameters or 'Pool.min_size' in self.parameters or 'Pool.quota_enabled' in self.parameters): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Invalid combination of pool update parameters. " "Pool name shouldnt be updated with other parameters." }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) raise AtomExecutionFailedError( "Invalid combination of pool update parameters. " "Pool name shoulnt be update with other parameters.") if 'Pool.pg_num' in self.parameters: fetched_pool = Pool(pool_id=self.parameters['Pool.pool_id']).load() if self.parameters['Pool.pg_num'] <= fetched_pool.pg_num: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "New pg-num cannot be less than " "existing value" }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) raise AtomExecutionFailedError( "New pg-num cannot be less than existing value") return True
def discover_storage_system(self): ret_val = {} # get the gluster version details cmd = subprocess.Popen("ceph --version", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = cmd.communicate() if err and 'command not found' in err: Event( Message(priority="debug", publisher=NS.publisher_id, payload={"message": "ceph not installed on host"})) return ret_val if out: details = out.split() ret_val['pkg_version'] = details[2] ret_val['pkg_name'] = details[0] # get the cluster_id details os_name = NS.platform.os cfg_file = "" cluster_name = None if os_name in ['CentOS Linux', 'Red Hat Enterprise Linux Server']: cfg_file = '/etc/sysconfig/ceph' # TODO(shtripat) handle the case of ubuntu if cfg_file != "": if not os.path.exists(cfg_file): Event( Message(priority="debug", publisher=NS.publisher_id, payload={ "message": "config file: %s not found" % cfg_file })) return ret_val with open(cfg_file) as f: for line in f: if line.startswith("CLUSTER="): cluster_name = line.split('\n')[0].split('=')[1] if cluster_name: raw_data = ini2json.ini_to_dict("/etc/ceph/%s.conf" % cluster_name) if "global" in raw_data: ret_val['detected_cluster_id'] = raw_data['global']['fsid'] ret_val['detected_cluster_name'] = cluster_name return ret_val
def test_from_json(): init() json_string = '{"timestamp":"Sat Oct 11 17:13:46 UTC 2003",' \ '"priority": "info","publisher": "node_context",' \ '"payload" : { "message": "TestMessage"}}' msg = Message.from_json(json_string) assert msg.priority == "info" json_string = '{"timestamp":"Sat Oct 11 17:13:46 UTC 2003",' \ '"priority": "None","publisher": "node_context",' \ '"payload" : { "message": "TestMessage"}}' msg = Message.from_json(json_string) assert msg.priority == "debug"
def create_ceph(parameters): _integration_id = parameters['TendrlContext.integration_id'] mon_ips, osd_ips = install_packages(parameters) # install the packages Event( Message(job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={ "message": "Cluster (%s) Successfully installed all " "ceph packages" % _integration_id })) # Configure Mons created_mons = create_mons(parameters, mon_ips) Event( Message(job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={ "message": "Cluster (%s) Successfully created and " "configured all ceph mons" % _integration_id })) # Configure osds create_osds(parameters, created_mons) Event( Message(job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={ "message": "Cluster (%s) Successfully created and " "configured all ceph osds" % _integration_id })) Event( Message(job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={ "message": "Cluster (%s) is ready for import by " "tendrl!" % _integration_id }))
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={"message": "Checking if rebalance is running"}, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) try: rebal_status = NS._int.client.read( 'clusters/%s/Volumes/%s/rebal_status' % (NS.tendrl_context.integration_id, self.parameters['Volume.vol_id'])).value if rebal_status is not None: if rebal_status in ["in progress", "in_progress"]: return True else: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "No rebalance running for" " volume %s" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) return False else: return False except etcd.EtcdKeyNotFound: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Volume %s not found" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) return False
def run(self): service_name = self.parameters.get("Service.name") Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Checking status of service %s on node %s" % ( service_name, self.parameters.get("fqdn") ) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], ) ) response = ServiceStatus(service_name).status() # and then check the response... if response: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Service %s running on node %s" % ( service_name, self.parameters.get("fqdn") ) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], ) ) return True else: Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "Failed to check status of service %s on " "node %s" % ( service_name, self.parameters.get("fqdn") ) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], ) ) return False
def _write(self, message): try: json_str = Message.to_json(message) self.sock.connect(self.socket_path) self._pack_and_send(json_str) except (socket.error, socket.timeout, TypeError): msg = Message.to_json(message) exc_type, exc_value, exc_tb = sys.exc_info() traceback.print_exception( exc_type, exc_value, exc_tb, file=sys.stderr) sys.stderr.write( "Unable to pass the message into socket.%s\n" % msg) finally: self.sock.close()
def create_volume(self, volume_name, brick_details, transport=None, replica_count=None, disperse_count=None, redundancy_count=None, tuned_profile=None, force=False): args = {} if transport: args.update({"transport": transport}) if replica_count: args.update({"replica_count": replica_count}) if disperse_count: args.update({"disperse_count": disperse_count}) if redundancy_count: args.update({"redundancy_count": redundancy_count}) if tuned_profile: args.update({"tuned_profile": tuned_profile}) if force: args.update({"force": force}) out, err, rc = create_gluster_volume.create_volume( volume_name, brick_details, **args) if rc == 0: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "gluster volume %s created successfully" % volume_name }, cluster_id=NS.tendrl_context.integration_id, )) else: Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "gluster volume creation failed for %s." " Details: %s" % (volume_name, out) }, cluster_id=NS.tendrl_context.integration_id, )) return False return True
def create_gluster(parameters): node_ips = get_node_ips(parameters) plugin = NS.gluster_provisioner.get_plugin() Event( Message( job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={"message": "Setting up gluster nodes %s" % parameters['TendrlContext.integration_id'] } ) ) ret_val = plugin.setup_gluster_node( node_ips, repo=NS.config.data.get('glusterfs_repo', None) ) if ret_val is not True: raise FlowExecutionFailedError("Error setting up gluster node") Event( Message( job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={"message": "Creating gluster cluster %s" % parameters['TendrlContext.integration_id'] } ) ) ret_val = plugin.create_gluster_cluster(node_ips) if ret_val is not True: raise FlowExecutionFailedError("Error creating gluster cluster") Event( Message( job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={"message": "Created Gluster Cluster %s" % parameters['TendrlContext.integration_id'] } ) )
def find_status(): """This util is used to find the status of sshd service. It will identify sshd status using process id of sshd service. input: (No input required) output: {"name": "", "port": "", "status": ""} """ sshd = {"name": "", "port": "", "status": ""} cmd = cmd_utils.Command("systemctl show sshd.service") out, err, rc = cmd.run() if not err: pid = _find_pid(out) if pid != 0: p = psutil.Process(pid) result = [ con for con in p.connections() if con.status == psutil.CONN_LISTEN and con.laddr[0] == "0.0.0.0" ] if result: sshd["name"] = p.name() sshd["port"] = int(result[0].laddr[1]) sshd["status"] = result[0].status else: err = "Unable to find ssh port number" Event( Message(priority="debug", publisher="commons", payload={"message": err})) else: err = "sshd service is not running" Event( Message(priority="debug", publisher="commons", payload={"message": err})) else: Event( Message(priority="debug", publisher="commons", payload={"message": err})) return sshd, err
def __run_module(self, attr): try: runner = ansible_module_runner.AnsibleRunner( ANSIBLE_MODULE_PATH, publisher_id=self.publisher_id, node_id=self.node_id, socket_path=self.socket_path, **attr) except ansible_module_runner.AnsibleModuleNotFound: # Backward compat ansible<=2.2 runner = ansible_module_runner.AnsibleRunner( "core/" + ANSIBLE_MODULE_PATH, publisher_id=self.publisher_id, node_id=self.node_id, socket_path=self.socket_path, **attr) try: result, err = runner.run() Event(Message( priority="debug", publisher=self.publisher_id, payload={"message": "Service Management: %s" % result}, node_id=self.node_id), socket_path=self.socket_path) except ansible_module_runner.AnsibleExecutableGenerationFailed as e: Event(Message(priority="error", publisher=self.publisher_id, payload={ "message": "Error switching the service: " "%s to %s state. Error: %s" % (self.attributes["name"], attr["state"], str(e)) }, node_id=self.node_id), socket_path=self.socket_path) return e.message, False message = result.get("msg", "").encode("ascii") state = result.get("state", "").encode("ascii") if attr["state"] in ["started", "restarted", "reloaded"]: if state == "started": success = True else: success = False else: if attr["state"] == state: success = True else: success = False return message, success
def load_definition(self): try: Event( Message(priority="debug", publisher=NS.publisher_id, payload={ "message": "Load definitions (.yml) for " "namespace.%s." "objects.%s.atoms.%s" % (self._ns.ns_name, self.obj.__name__, self.__class__.__name__) })) except KeyError: sys.stdout.write( "Load definitions (.yml) for " "namespace.%s.objects.%s." "atoms.%s \n" % (self._ns.ns_name, self.obj.__name__, self.__class__.__name__)) try: return self._ns.get_atom_definition(self.obj.__name__, self.__class__.__name__) except KeyError as ex: msg = "Could not find definitions (.yml) for" \ "namespace.%s.objects.%s.atoms.%s" % \ ( self._ns.ns_src, self.obj.__name__, self.__class__.__name__ ) try: Event( ExceptionMessage(priority="debug", publisher=NS.publisher_id, payload={ "message": "Error", "exception": ex })) except KeyError: sys.stderr.write("Error: %s \n" % ex) try: Event( Message(priority="debug", publisher=NS.publisher_id, payload={"message": msg})) except KeyError: sys.stderr.write(msg + "\n") raise Exception(msg)
def run(self): result = None out = None try: runner = ansible_module_runner.AnsibleRunner( ANSIBLE_MODULE_PATH, **self.attributes) except ansible_module_runner.AnsibleModuleNotFound: # Backward compat ansible<=2.2 runner = ansible_module_runner.AnsibleRunner( "core/" + ANSIBLE_MODULE_PATH, **self.attributes) try: out, err = runner.run() Event( Message(priority="debug", publisher="commons", payload={"message": "SSH-key Generation: %s" % out})) except ansible_module_runner.AnsibleExecutableGenerationFailed as e: err = str(e.message) Event( Message(priority="debug", publisher="commons", payload={ "message": "SSH-Key Genertion failed %s. " "Error: %s" % (self.attributes["_raw_params"], err) })) out = "Ansible Executable Generation Failed" if out is None: _msg = "No output after Ansible Executable Generation" Event( Message(priority="debug", publisher="commons", payload={"message": _msg})) return None, "No Output" if out is not None and "ssh_public_key" not in out: err = out Event( Message(priority="debug", publisher="commons", payload={ "message": "Unable to generate ssh-key .err: " "%s" % err })) elif "ssh_public_key" in out: result = out["ssh_public_key"] return result, err
def get_lvs(): _lvm_cmd = ("lvm vgs --unquoted --noheading --nameprefixes " "--separator $ --nosuffix --units m -o lv_uuid," "lv_name,data_percent,pool_lv,lv_attr,lv_size," "lv_path,lv_metadata_size,metadata_percent,vg_name") cmd = cmd_utils.Command(_lvm_cmd, True) out, err, rc = cmd.run() if rc != 0: Event( Message(priority="debug", publisher=NS.publisher_id, payload={"message": str(err)})) return None out = out.split('\n') l = map( lambda x: dict(x), map(lambda x: [e.split('=') for e in x], map(lambda x: x.strip().split('$'), out))) d = {} for i in l: if i['LVM2_LV_ATTR'][0] == 't': k = "%s/%s" % (i['LVM2_VG_NAME'], i['LVM2_LV_NAME']) else: k = os.path.realpath(i['LVM2_LV_PATH']) d.update({k: i}) return d
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Checking if pool-id %s doesnt exist" % self.parameters['Pool.pool_id'] }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, ) ) try: NS._int.client.read( 'clusters/%s/Pools/%s' % ( NS.tendrl_context.integration_id, self.parameters['Pool.pool_id'] ) ) except etcd.EtcdKeyNotFound: return True return False
def complete_jid(self, result): """Call this when remote execution is done. Implementations must always update .jid appropriately here: either to the jid of a new job, or to None. """ self.result = result Event( Message( priority="info", publisher=NS.publisher_id, payload={"message": "Request %s JID %s completed with result=" "%s" % (self.id, self.jid, self.result) } ) ) self.jid = None # This is a default behaviour for UserRequests which don't # override this method: assume completion of a JID means the # job is now done. self.complete()
def on_heartbeat(self, cluster_data): """Handle a ceph.heartbeat. These tell us whether there are any new versions of cluster maps for us to fetch. """ if cluster_data is None: return if cluster_data['versions'] is None: return self.update_time = datetime.datetime.utcnow().replace(tzinfo=utc) Event( Message(priority="info", publisher=NS.publisher_id, payload={ "message": 'Checking for version increments in ' 'heartbeat...' })) for sync_type in SYNC_OBJECT_TYPES: data = self._sync_objects.on_version( sync_type, cluster_data['versions'][sync_type.str]) if data: self.on_sync_object(data) # Get and update rbds for pools self._sync_rbds() # Get and update ec profiles for the cluster self._sync_ec_profiles()
def read_socket(self, sock, *args): try: size = self._msg_length(sock) data = self._read(sock, size) frmt = "=%ds" % size msg = struct.unpack(frmt, data) message = Message.from_json(msg[0]) # Logger is in commons so passing alert from here alert_conditions = [ "alert_condition_status", "alert_condition_state", "alert_condition_unset" ] if message.priority == NOTICE_PRIORITY: alert = True for alert_condition in alert_conditions: if alert_condition not in message.payload: alert = False break if alert: update_alert(message) Logger(message) except (socket.error, socket.timeout): exc_type, exc_value, exc_tb = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stderr) except (TypeError, ValueError, KeyError, AttributeError): sys.stderr.write("Unable to log the message.%s\n" % data) exc_type, exc_value, exc_tb = sys.exc_info() traceback.print_exception(exc_type, exc_value, exc_tb, file=sys.stderr)
def __init__(self, package_name, package_type, package_version=None): self.attributes = dict() self.attributes["name"] = package_name if package_type == "pip": self.attributes["editable"] = "false" self.ansible_module_path = "packaging/language/pip.py" elif package_type == "rpm": self.ansible_module_path = "packaging/os/yum.py" elif package_type == "deb": self.ansible_module_path = "packaging/os/apt.py" else: Event( Message( priority="debug", publisher=NS.publisher_id, payload={"message": "Unsupported package type: %s" % package_type } ) ) raise ValueError("Unsupported package type: %s" % package_type) if package_version: self.attributes["name"] = package_name + "-" + package_version
def osds_by_pool(self): """Get the OSDS which may be used in this pool :return dict of pool ID to OSD IDs in the pool """ result = {} for pool_id, pool in self.pools_by_id.items(): osds = None for rule in [ r for r in self.data['crush']['rules'] if r['ruleset'] == pool['crush_ruleset'] ]: if rule['min_size'] <= pool['size'] <= rule['max_size']: osds = self.osds_by_rule_id[rule['rule_id']] if osds is None: # Fallthrough, the pool size didn't fall within any of the # rules in its ruleset, Calamari doesn't understand. # Just report all OSDs instead of failing horribly. Event( Message(priority="debug", publisher=NS.publisher_id, payload={ "message": "Cannot determine OSDS for pool %s" % pool_id })) osds = self.osds_by_id.keys() result[pool_id] = osds return result
def _emit_event(self, severity, resource, curr_value, msg, plugin_instance=None): if not NS.node_context.node_id: return alert = {} alert['source'] = NS.publisher_id alert['pid'] = os.getpid() alert['time_stamp'] = now().isoformat() alert['alert_type'] = 'status' alert['severity'] = SEVERITIES[severity] alert['resource'] = resource alert['current_value'] = curr_value alert['tags'] = dict(message=msg, cluster_id=NS.tendrl_context.integration_id, cluster_name=NS.tendrl_context.cluster_name, sds_name=NS.tendrl_context.sds_name, fqdn=socket.getfqdn()) if plugin_instance: alert['tags']['plugin_instance'] = plugin_instance alert['node_id'] = NS.node_context.node_id Event(Message("notice", "alerting", {'message': json.dumps(alert)}))
def __generate_executable_module(self): modname = os.path.basename(self.module_path) modname = os.path.splitext(modname)[0] try: (module_data, module_style, shebang) = \ module_common.modify_module( modname, self.module_path, self.argument_dict, task_vars={} ) except Exception as e: Event(Message(priority="debug", publisher=self.publisher_id, payload={ "message": "Could not generate ansible " "executable data " "for module : %s. Error: %s" % (self.module_path, str(e)) }, node_id=self.node_id), socket_path=self.socket_path) raise AnsibleExecutableGenerationFailed( module_path=self.module_path, err=str(e)) return module_data
def __init__(self, message, socket_path=None): # node_agent log messages other than notice # priority are directly pushed into logger if message.publisher == "node_agent" and \ message.priority != "notice": try: json_str = Message.to_json(message) message = Message.from_json(json_str) Logger(message) except (TypeError, ValueError, KeyError, AttributeError): sys.stderr.write( "Unable to log the message.%s\n" % message) exc_type, exc_value, exc_tb = sys.exc_info() traceback.print_exception( exc_type, exc_value, exc_tb, file=sys.stderr) else: self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.socket_path = socket_path if self.socket_path is None: self.socket_path = NS.config.data['logging_socket_path'] self._write(message)
def push_operation(self): etcd_utils.write( "/messages/jobs/%s" % self.message.job_id, Message.to_json(self.message), append=True) etcd_utils.refresh( "/messages/jobs/%s" % self.message.job_id, ttl=NS.config.data['message_retention_time'] ) log_message = ("%s:%s") % ( self.message.job_id, self.message.payload["message"]) return log_message
def _logger(self, log_message): # Invalid message if isinstance(log_message, Message): log_message = Message.to_json(log_message) message = "%s - %s - %s:%s - %s - %s - %s" % ( self.message.timestamp, self.message.publisher, self.message.caller["filename"], self.message.caller["line_no"], self.message.caller["function"], self.message.priority.upper(), log_message ) try: method = getattr( LOG, Logger.logger_priorities[self.message.priority]) except AttributeError: raise NotImplementedError(self.message.priority) method(message)