def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Checking if volume %s exists" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) try: fetched_volume = Volume( vol_id=self.parameters['Volume.vol_id']).load() except etcd.EtcdKeyNotFound: Event( Message( priority="warning", publisher=NS.publisher_id, payload={ "message": "Volume %s doesnt exist" % self.parameters["Volume.volname"] }, job_id=self.parameters["job_id"], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False return True
def run(self): pool_id = self.parameters['Rbd.pool_id'] rbd_name = self.parameters['Rbd.name'] Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleting rbd %s on pool %s" % (self.parameters['Rbd.name'], self.parameters['Rbd.pool_id']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) crud = RbdCrud() resp = crud.delete_rbd(pool_id, rbd_name) try: crud.sync_request_status(resp['request']) except RequestStateError as ex: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Failed to delete rbd %s." " Error: %s" % (self.parameters['Rbd.name'], ex) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False NS._int.wclient.delete( "clusters/%s/Pools/%s/Rbds/%s" % (NS.tendrl_context.integration_id, self.parameters['Rbd.pool_id'], self.parameters['Rbd.name']), recursive=True) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleted rbd %s on pool-id %s" % (self.parameters['Rbd.name'], self.parameters['Rbd.pool_id']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return True
def run(self): if NS.gdeploy_plugin.stop_volume( self.parameters.get('Volume.volname')): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Stopped the volume %s successfully" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) else: Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "Failed to stop the volume %s" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) return False return True
def main(): gluster_integration.GlusterIntegrationNS() TendrlNS() NS.type = "sds" NS.publisher_id = "gluster_integration" NS.central_store_thread = central_store.GlusterIntegrationEtcdCentralStore( ) NS.state_sync_thread = sds_sync.GlusterIntegrationSdsSyncStateThread() NS.node_context.save() try: NS.tendrl_context = NS.tendrl_context.load() Event( Message(priority="info", publisher=NS.publisher_id, payload={ "message": "Integration %s is part of sds cluster" % NS.tendrl_context.integration_id })) except etcd.EtcdKeyNotFound: Event( Message(priority="error", publisher=NS.publisher_id, payload={ "message": "Node %s is not part of any sds cluster" % NS.node_context.node_id })) raise Exception("Integration cannot be started," " please Import or Create sds cluster" " in Tendrl and include Node %s" % NS.node_context.node_id) NS.tendrl_context.save() NS.gluster.definitions.save() NS.gluster.config.save() pm = ProvisioningManager("GdeployPlugin") NS.gdeploy_plugin = pm.get_plugin() m = GlusterIntegrationManager() m.start() complete = gevent.event.Event() def shutdown(): Event( Message(priority="info", publisher=NS.publisher_id, payload={"message": "Signal handler: stopping"})) complete.set() gevent.signal(signal.SIGTERM, shutdown) gevent.signal(signal.SIGINT, shutdown) while not complete.is_set(): complete.wait(timeout=1)
def create_gluster_cluster(self, hosts): self._reload_modules() out, err, rc = create_cluster.create_cluster(hosts) if rc == 0: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "gluster cluster created successfully" }, cluster_id=NS.tendrl_context.integration_id, )) else: Event( Message( priority="debug", publisher=NS.publisher_id, payload={ "message": "Error while creating gluster cluster" ". Details: %s" % str(out) }, cluster_id=NS.tendrl_context.integration_id, )) return False return True
def gluster_provision_bricks(self, brick_dictionary, disk_type=None, disk_count=None, stripe_count=None): out, err, rc = gluster_brick_provision.provision_disks( brick_dictionary, disk_type, disk_count, stripe_count) if rc == 0 and err == "": Event( Message( priority="error", publisher=NS.publisher_id, payload={"message": "Bricks Provisioned successfully"}, cluster_id=NS.tendrl_context.integration_id, )) else: Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "Bricks Provisioning Failed. Error %s" % (str(out)) }, cluster_id=NS.tendrl_context.integration_id, )) return False return True
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={"message": "Checking if update parameters are valid"}, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) if 'Pool.pg_num' in self.parameters: fetched_pool = Pool(pool_id=self.parameters['Pool.pool_id']).load() if self.parameters['Pool.pg_num'] <= int(fetched_pool.pg_num): Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "New pg-num cannot be less than " "existing value" }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) raise AtomExecutionFailedError( "New pg-num cannot be less than existing value") return True
def shrink_gluster_cluster(self, host): self._reload_modules() current_host = NS.node_context.fqdn out, err, rc = remove_host.remove_host([current_host, host]) if rc == 0: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "gluster cluster shrinked successfully" }, cluster_id=NS.tendrl_context.integration_id, )) else: Event( Message( priority="debug", publisher=NS.publisher_id, payload={ "message": "Error while shrinking gluster cluster" ". Details: %s" % str(out) }, cluster_id=NS.tendrl_context.integration_id, )) return False return True
def on_map(self, sync_type, osd_map): assert sync_type == OsdMap assert self._await_version is not None ready = osd_map.version >= self._await_version if ready: Event( Message( priority="debug", publisher=NS.publisher_id, payload={"message": "check passed (%s >= %s)" % (osd_map.version, self._await_version) } ) ) self.complete() else: Event( Message( priority="debug", publisher=NS.publisher_id, payload={"message": "check pending (%s < %s)" % (osd_map.version, self._await_version) } ) )
def add_osds(parameters): # Get the list of existing mons created_mons = ceph_help.existing_mons(parameters) osd_ips = [] for node, config in parameters["Cluster.node_configuration"].iteritems(): osd_ips.append(config["provisioning_ip"]) # If osds passed create and add them if len(osd_ips) > 0: Event( Message( job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={ "message": "Creating OSDs on nodes: %s of cluster: %s" % (str(osd_ips), parameters['TendrlContext.integration_id']) })) create_ceph_help.create_osds(parameters, created_mons) Event( Message( job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={ "message": "Created OSDs on nodes: %s of cluster: %s" % (str(osd_ips), parameters['TendrlContext.integration_id']) }))
def stop_volume(self, volume_name, host=None, force=None): args = {} if host: args.update({"host": host}) if force: args.update({"force": force}) out, err, rc = stop_volume.stop_volume(volume_name, **args) if rc == 0: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Volume %s stopped successfully" % volume_name }, cluster_id=NS.tendrl_context.integration_id, )) else: Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "Volume stop failed for volume " "%s. Details: %s" % (volume_name, out) }, cluster_id=NS.tendrl_context.integration_id, )) return False return True
def configure_monitoring(self, integration_id): try: sds_tendrl_context = central_store_util.read( 'clusters/%s/TendrlContext' % integration_id) except EtcdKeyNotFound: return None except EtcdException as ex: Event( ExceptionMessage(priority="debug", publisher=NS.publisher_id, payload={ "message": 'Failed to configure monitoring for ' 'cluster %s as tendrl context could ' 'not be fetched.' % integration_id, "exception": ex })) return for plugin in SDSPlugin.plugins: if plugin.name == sds_tendrl_context['sds_name']: return plugin.configure_monitoring(sds_tendrl_context) Event( Message(priority="debug", publisher=NS.publisher_id, payload={ "message": 'No plugin defined for %s. Hence cannot ' 'configure it' % sds_tendrl_context['sds_name'] })) return None
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Checking if volume %s doesnt exist" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) try: NS._int.client.read('clusters/%s/Volumes/%s' % (NS.tendrl_context.integration_id, self.parameters['Volume.vol_id'])) except etcd.EtcdKeyNotFound: Event( Message( priority="warning", publisher=NS.publisher_id, payload={ "message": "Volume %s doesnt exist" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) return True return False
def __init__(self, module_path, publisher_id=None, node_id=None, socket_path=None, **kwargs): self.module_path = modules.__path__[0] + "/" + module_path self.socket_path = socket_path or NS.config.data['logging_socket_path'] self.publisher_id = publisher_id or NS.publisher_id self.node_id = node_id or NS.node_context.node_id if not os.path.isfile(self.module_path): Event(Message(priority="debug", publisher=self.publisher_id, payload={ "message": "Module path: %s does not exist" % self.module_path }, node_id=self.node_id), socket_path=self.socket_path) raise AnsibleModuleNotFound(module_path=self.module_path) if kwargs == {}: Event(Message(priority="debug", publisher=self.publisher_id, payload={"message": "Empty argument dictionary"}, node_id=self.node_id), socket_path=self.socket_path) raise ValueError else: self.argument_dict = kwargs self.argument_dict['_ansible_selinux_special_fs'] = \ ['nfs', 'vboxsf', 'fuse', 'ramfs']
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Stopping the volume %s" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) subprocess.call([ 'gluster', 'volume', 'stop', self.parameters.get('Volume.volname'), '--mode=script' ]) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Successfully stopped the volume %s" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) return True
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Checking if rebalance is not running" }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, ) ) try: rebal_status = NS._int.client.read( 'clusters/%s/Volumes/%s/rebal_status' % ( NS.tendrl_context.integration_id, self.parameters['Volume.vol_id'] ) ).value if rebal_status is not None: if rebal_status == "not applicable" or\ rebal_status == "not_started" or\ rebal_status == "completed": return True if rebal_status == "in progress": return False Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Volume rebalance status is %s" % rebal_status }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, ) ) return False else: return True except etcd.EtcdKeyNotFound as ex: Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "Volume %s not found" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, ) ) return False
def run(self): attrs = dict(pool_id=self.parameters['Rbd.pool_id'], size=str(self.parameters['Rbd.size'])) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Re-sizing rbd %s on pool %s to %s" % (self.parameters['Rbd.name'], self.parameters['Rbd.pool_id'], self.parameters['Rbd.size']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, ) ) crud = Crud() resp = crud.update("rbd", self.parameters['Rbd.name'], attrs) try: crud.sync_request_status(resp['request']) except RequestStateError as ex: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Failed to resize rbd %s." " Error: %s" % (self.parameters['Rbd.name'], ex) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, ) ) return False Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Successfully re-sized rbd %s on pool-id %s to " "%s" % (self.parameters['Rbd.name'], self.parameters['Rbd.pool_id'], self.parameters['Rbd.size']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, ) ) return True
def _run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={"message": "%s running" % self.__class__.__name__})) # Check if monitor key exists, if not sync try: NS._int.client.read("clusters/%s/_mon_key" % NS.tendrl_context.integration_id) except etcd.EtcdKeyNotFound: out, err, rc = cmd_utils.Command( "ceph auth get mon. --cluster %s" % NS.tendrl_context.cluster_name).run() if rc != 0: Event( Message(priority="debug", publisher=NS.publisher_id, payload={ "message": "Couldn't get monitor key. Error:%s" % err })) else: if out and out != "": mon_sec = out.split('\n')[1].strip().split( ' = ')[1].strip() NS._int.wclient.write( "clusters/%s/_mon_key" % NS.tendrl_context.integration_id, mon_sec) while not self._complete.is_set(): gevent.sleep(int(NS.config.data.get("sync_interval", 10))) try: NS._int.wclient.write("clusters/%s/sync_status" % NS.tendrl_context.integration_id, "in_progress", prevExist=False) except (etcd.EtcdAlreadyExist, etcd.EtcdCompareFailed) as ex: pass cluster_data = ceph.heartbeat(NS.tendrl_context.cluster_id) self.on_heartbeat(cluster_data) _cluster = NS.tendrl.objects.Cluster( integration_id=NS.tendrl_context.integration_id) if _cluster.exists(): _cluster.sync_status = "done" _cluster.last_sync = str(now()) _cluster.save() Event( Message( priority="info", publisher=NS.publisher_id, payload={"message": "%s complete" % self.__class__.__name__}))
def get_mail_client(self): if not self.admin_config: raise NotificationDispatchError( "Admin mail configuration is required for dispatching email" " notification" ) if ( self.admin_config.get('auth') is not None and self.admin_config['auth'] == SSL_AUTHENTICATION ): try: server = smtplib.SMTP_SSL( self.admin_config['email_smtp_server'], int(self.admin_config['email_smtp_port']) ) return server except (smtplib.socket.gaierror, smtplib.SMTPException, Exception) as ex: Event( ExceptionMessage( priority="error", publisher="alerting", payload={ "message": 'Failed to fetch client for smtp' ' server %s and smtp port %s' % ( self.admin_config['email_smtp_server'], str(self.admin_config['email_smtp_port']), ), "exception": ex } ) ) raise NotificationDispatchError(str(ex)) else: try: server = smtplib.SMTP( self.admin_config['email_smtp_server'], int(self.admin_config['email_smtp_port']) ) if self.admin_config['auth'] != '': server.starttls() return server except (smtplib.socket.gaierror, smtplib.SMTPException) as ex: Event( ExceptionMessage( priority="error", publisher="alerting", payload={ "message": 'Failed to fetch client for smtp' ' server %s and smtp port %s' % ( self.admin_config['email_smtp_server'], str(self.admin_config['email_smtp_port']), ), "exception": ex } ) ) raise NotificationDispatchError(str(ex))
def run(self): vol_id = self.parameters['Volume.vol_id'] Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Stopping the volume %s before delete" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) subprocess.call([ 'gluster', 'volume', 'stop', self.parameters.get('Volume.volname'), '--mode=script' ]) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleting the volume %s" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) subprocess.call([ 'gluster', 'volume', 'delete', self.parameters.get('Volume.volname'), '--mode=script' ]) NS.etcd_orm.client.delete("clusters/%s/Volumes/%s" % (NS.tendrl_context.integration_id, self.parameters['Volume.vol_id']), recursive=True) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleted the volume %s" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) return True
def run(self): attrs = dict(pool_id=self.parameters['Rbd.pool_id'], size=str(self.parameters['Rbd.size'])) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Re-sizing rbd %s on pool %s to %s" % (self.parameters['Rbd.name'], self.parameters['Rbd.pool_id'], self.parameters['Rbd.size']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) crud = Crud() ret_val = crud.update("rbd", self.parameters['Rbd.name'], attrs) if ret_val['response'] is not None and \ ret_val['response']['error'] is True: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Failed to resize rbd %s." " Error: %s" % (self.parameters['Rbd.name'], ret_val['error_status']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Successfully re-sized rbd %s on pool-id %s to " "%s" % (self.parameters['Rbd.name'], self.parameters['Rbd.pool_id'], self.parameters['Rbd.size']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return True
def run(self): pool_id = self.parameters['Pool.pool_id'] Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleting pool-id %s" % self.parameters['Pool.pool_id'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) crud = Crud() ret_val = crud.delete("pool", pool_id) if ret_val['response'] is not None and \ ret_val['response']['error'] is True: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Failed to delete pool %s." " Error: %s" % (self.parameters['Pool.poolname'], ret_val['error_status']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False # TODO(shtripat) Use namespace tree and not etcd orm later NS.etcd_orm.client.delete("clusters/%s/Pools/%s" % (NS.tendrl_context.integration_id, self.parameters['Pool.pool_id']), recursive=True) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleted pool-id %s" % self.parameters['Pool.pool_id'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return True
def run(self): pool_id = self.parameters['Pool.pool_id'] Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleting pool-id %s" % self.parameters['Pool.pool_id'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) crud = Crud() resp = crud.delete("pool", pool_id) try: crud.sync_request_status(resp['request']) except RequestStateError as ex: Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "Failed to delete pool %s." " Error: %s" % (self.parameters['Pool.poolname'], ex) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False # TODO(shtripat) Use namespace tree and not etcd orm later NS._int.wclient.delete("clusters/%s/Pools/%s" % (NS.tendrl_context.integration_id, self.parameters['Pool.pool_id']), recursive=True) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleted pool-id %s" % self.parameters['Pool.pool_id'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return True
def parse_cluster(self, cluster_id): utilization = {} try: utilization = central_store_util.read('/clusters/%s/Utilization' % cluster_id) except (EtcdKeyNotFound, AttributeError, EtcdException) as ex: Event( ExceptionMessage(priority="debug", publisher=NS.publisher_id, payload={ "message": 'Utilization not available for cluster' ' %s.' % cluster_id, "exception": ex })) used = 0 total = 0 percent_used = 0 if utilization.get('used_capacity'): used = utilization.get('used_capacity') elif utilization.get('used'): used = utilization.get('used') if utilization.get('raw_capacity'): total = utilization.get('raw_capacity') elif utilization.get('total'): total = utilization.get('total') if utilization.get('pcnt_used'): percent_used = utilization.get('pcnt_used') try: sds_name = central_store_util.get_cluster_sds_name(cluster_id) except (EtcdKeyNotFound, EtcdException, AttributeError) as ex: Event( ExceptionMessage(priority="debug", publisher=NS.publisher_id, payload={ "message": 'Error caught fetching sds name of' ' cluster %s.' % cluster_id, "exception": ex })) return ClusterSummary( utilization={ 'total': int(total), 'used': int(used), 'percent_used': float(percent_used) }, iops=str(self.get_cluster_iops(cluster_id)), hosts_count=self.parse_host_count(cluster_id), sds_type=sds_name, node_summaries=self.cluster_nodes_summary(cluster_id), sds_det=NS.sds_monitoring_manager.get_cluster_summary( cluster_id, central_store_util.get_cluster_name(cluster_id)), cluster_id=cluster_id, )
def parse_host_count(self, cluster_id): status_wise_count = { 'total': 0, 'down': 0, 'crit_alert_count': 0, 'warn_alert_count': 0 } cluster_nodes = central_store_util.get_cluster_node_ids(cluster_id) for node_id in cluster_nodes: try: node_context = central_store_util.read( '/clusters/%s/nodes/%s/NodeContext' % (cluster_id, node_id)) except (EtcdKeyNotFound, AttributeError, EtcdException) as ex: Event( ExceptionMessage(priority="debug", publisher=NS.publisher_id, payload={ "message": 'Failed to fetch node-context from' ' /clusters/%s/nodes/%s/NodeContext' % (cluster_id, node_id), "exception": ex })) continue status = node_context.get('status') if status: if status != 'UP': status_wise_count['down'] = status_wise_count['down'] + 1 status_wise_count['total'] = status_wise_count['total'] + 1 alerts = [] try: alerts = central_store_util.get_node_alerts(node_id) except EtcdKeyNotFound: pass except (AttributeError, EtcdException) as ex: Event( ExceptionMessage(priority="debug", publisher=NS.publisher_id, payload={ "message": 'Error fetching alerts for node %s' % (node_id), "exception": ex })) for alert in alerts: if alert.get('severity') == 'CRITICAL': status_wise_count['crit_alert_count'] = \ status_wise_count['crit_alert_count'] + 1 elif alert.get('severity') == 'WARNING': status_wise_count['warn_alert_count'] = \ status_wise_count['warn_alert_count'] + 1 return status_wise_count
def load_definition(self): cls_name = self.__class__.__name__ if hasattr(self, "obj"): obj_name = self.obj.__name__ logger.log( "debug", NS.publisher_id, { "message": "Load definitions for namespace.%s." "objects.%s.flows.%s" % (self._ns.ns_src, obj_name, cls_name) }) try: return self._ns.get_obj_flow_definition(obj_name, cls_name) except KeyError as ex: msg = "Could not find definitions for " \ "namespace.%s.objects.%s.flows.%s" % (self._ns.ns_src, obj_name, cls_name) Event( ExceptionMessage(priority="debug", publisher=NS.publisher_id, payload={ "message": "Error", "exception": ex })) logger.log("debug", NS.publisher_id, {"message": msg}) raise Exception(msg) finally: self.to_str = "%s.objects.%s.flows.%s" % (self._ns.ns_name, obj_name, cls_name) else: logger.log( "debug", NS.publisher_id, { "message": "Load definitions for namespace.%s." "flows.%s" % (self._ns.ns_src, cls_name) }) try: return self._ns.get_flow_definition(cls_name) except KeyError as ex: msg = "Could not find definitions for namespace.%s.flows.%s" %\ (self._ns.ns_src, cls_name) Event( ExceptionMessage(priority="debug", publisher=NS.publisher_id, payload={ "message": "Error", "exception": ex })) logger.log("debug", NS.publisher_id, {"message": msg}) raise Exception(msg) finally: self.to_str = "%s.flows.%s" % (self._ns.ns_name, cls_name)
def expand_gluster(parameters): node_ips = get_node_ips(parameters) plugin = NS.gluster_provisioner.get_plugin() Event( Message(job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={ "message": "Setting up gluster nodes %s" % parameters['TendrlContext.integration_id'] })) ret_val = plugin.setup_gluster_node(node_ips, repo=NS.config.data.get( 'glusterfs_repo', None)) if ret_val is not True: raise FlowExecutionFailedError("Error setting up gluster node") Event( Message(job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={ "message": "Expanding gluster cluster %s" % parameters['TendrlContext.integration_id'] })) failed_nodes = [] for node in node_ips: ret_val = plugin.expand_gluster_cluster(node) if not ret_val: failed_nodes.append(node) if failed_nodes: raise FlowExecutionFailedError( "Error expanding gluster cluster. Following nodes failed: %s" % ",".join(failed_nodes)) Event( Message(job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={ "message": "Expanded Gluster Cluster %s." " New nodes are: %s" % (parameters['TendrlContext.integration_id'], ",".join(node_ips)) }))
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleting ec-profile %s" % self.parameters['ECProfile.name'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) crud = Crud() resp = crud.delete("ec_profile", self.parameters['ECProfile.name']) try: crud.sync_request_status(resp['request']) except RequestStateError as ex: Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "Failed to delete ec-profile %s." " Error: %s" % (self.parameters['ECProfile.name'], ex) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False NS._int.wclient.delete("clusters/%s/ECProfiles/%s" % (NS.tendrl_context.integration_id, self.parameters['ECProfile.name']), recursive=True) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleted ec-profile %s" % self.parameters['ECProfile.name'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return True
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={"message": "Checking if update parameters are valid"}, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) if 'Pool.poolname' in self.parameters and \ ('Pool.pg_num' in self.parameters or 'Pool.size' in self.parameters or 'Pool.pg_num' in self.parameters or 'Pool.min_size' in self.parameters or 'Pool.quota_enabled' in self.parameters): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Invalid combination of pool update parameters. " "Pool name shouldnt be updated with other parameters." }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) raise AtomExecutionFailedError( "Invalid combination of pool update parameters. " "Pool name shoulnt be update with other parameters.") if 'Pool.pg_num' in self.parameters: fetched_pool = Pool(pool_id=self.parameters['Pool.pool_id']).load() if self.parameters['Pool.pg_num'] <= fetched_pool.pg_num: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "New pg-num cannot be less than " "existing value" }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) raise AtomExecutionFailedError( "New pg-num cannot be less than existing value") return True
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleting ec-profile %s" % self.parameters['ECProfile.name'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) crud = Crud() ret_val = crud.delete("ec_profile", self.parameters['ECProfile.name']) if ret_val['response'] is not None and \ ret_val['response']['error'] is True: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Failed to delete ec-profile %s." " Error: %s" % (self.parameters['ECProfile.name'], ret_val['error_status']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False NS.etcd_orm.client.delete("clusters/%s/ECProfiles/%s" % (NS.tendrl_context.integration_id, self.parameters['ECProfile.name']), recursive=True) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleted ec-profile %s" % self.parameters['ECProfile.name'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return True
def test_write(): setattr(__builtin__, "NS", maps.NamedDict()) setattr(NS, "_int", maps.NamedDict()) NS["config"] = maps.NamedDict() NS.config["data"] = maps.NamedDict() NS.config.data['tags'] = "test" NS.publisher_id = "node_context" NS.config.data['logging_socket_path'] = "/var/run/tendrl/message.sock" event_to_test = Event(Message(priority="info", publisher="node", payload={ "message": "Test message"}, node_id="Test id")) event_to_test._write(message_new) event_to_test.socket_path = None event_to_test._write(message_new)