def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Checking if volume %s exists" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) try: fetched_volume = Volume( vol_id=self.parameters['Volume.vol_id']).load() except etcd.EtcdKeyNotFound: Event( Message( priority="warning", publisher=NS.publisher_id, payload={ "message": "Volume %s doesnt exist" % self.parameters["Volume.volname"] }, job_id=self.parameters["job_id"], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False return True
def gluster_provision_bricks(self, brick_dictionary, disk_type=None, disk_count=None, stripe_count=None): out, err, rc = gluster_brick_provision.provision_disks( brick_dictionary, disk_type, disk_count, stripe_count) if rc == 0 and err == "": Event( Message( priority="error", publisher=NS.publisher_id, payload={"message": "Bricks Provisioned successfully"}, cluster_id=NS.tendrl_context.integration_id, )) else: Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "Bricks Provisioning Failed. Error %s" % (str(out)) }, cluster_id=NS.tendrl_context.integration_id, )) return False return True
def main(): gluster_integration.GlusterIntegrationNS() TendrlNS() NS.type = "sds" NS.publisher_id = "gluster_integration" NS.central_store_thread = central_store.GlusterIntegrationEtcdCentralStore( ) NS.state_sync_thread = sds_sync.GlusterIntegrationSdsSyncStateThread() NS.node_context.save() try: NS.tendrl_context = NS.tendrl_context.load() Event( Message(priority="info", publisher=NS.publisher_id, payload={ "message": "Integration %s is part of sds cluster" % NS.tendrl_context.integration_id })) except etcd.EtcdKeyNotFound: Event( Message(priority="error", publisher=NS.publisher_id, payload={ "message": "Node %s is not part of any sds cluster" % NS.node_context.node_id })) raise Exception("Integration cannot be started," " please Import or Create sds cluster" " in Tendrl and include Node %s" % NS.node_context.node_id) NS.tendrl_context.save() NS.gluster.definitions.save() NS.gluster.config.save() pm = ProvisioningManager("GdeployPlugin") NS.gdeploy_plugin = pm.get_plugin() m = GlusterIntegrationManager() m.start() complete = gevent.event.Event() def shutdown(): Event( Message(priority="info", publisher=NS.publisher_id, payload={"message": "Signal handler: stopping"})) complete.set() gevent.signal(signal.SIGTERM, shutdown) gevent.signal(signal.SIGINT, shutdown) while not complete.is_set(): complete.wait(timeout=1)
def test_validate(): init() msg = Message(priority=None, publisher="node_context", payload={"message": "Test Message"}) assert msg.validate() is False msg = Message(priority="info", publisher=None, payload={"message": "Test Message"}) assert msg.validate() is False msg = Message(priority="info", publisher="node_context", payload='{"message":"Test Message"}') assert msg.validate() is False msg = Message(priority="info", publisher="node_context", payload={"message": "Test Message"}, job_id="test_job") assert msg.validate() is False msg = Message(priority="info", publisher="node_context", payload={"message": "Test Message"}, job_id="test_job", flow_id="test_flow") assert msg.validate() is True
def on_map(self, sync_type, osd_map): assert sync_type == OsdMap assert self._await_version is not None ready = osd_map.version >= self._await_version if ready: Event( Message( priority="debug", publisher=NS.publisher_id, payload={"message": "check passed (%s >= %s)" % (osd_map.version, self._await_version) } ) ) self.complete() else: Event( Message( priority="debug", publisher=NS.publisher_id, payload={"message": "check pending (%s < %s)" % (osd_map.version, self._await_version) } ) )
def shrink_gluster_cluster(self, host): self._reload_modules() current_host = NS.node_context.fqdn out, err, rc = remove_host.remove_host([current_host, host]) if rc == 0: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "gluster cluster shrinked successfully" }, cluster_id=NS.tendrl_context.integration_id, )) else: Event( Message( priority="debug", publisher=NS.publisher_id, payload={ "message": "Error while shrinking gluster cluster" ". Details: %s" % str(out) }, cluster_id=NS.tendrl_context.integration_id, )) return False return True
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Stopping the volume %s" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) subprocess.call([ 'gluster', 'volume', 'stop', self.parameters.get('Volume.volname'), '--mode=script' ]) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Successfully stopped the volume %s" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) return True
def create_gluster_cluster(self, hosts): self._reload_modules() out, err, rc = create_cluster.create_cluster(hosts) if rc == 0: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "gluster cluster created successfully" }, cluster_id=NS.tendrl_context.integration_id, )) else: Event( Message( priority="debug", publisher=NS.publisher_id, payload={ "message": "Error while creating gluster cluster" ". Details: %s" % str(out) }, cluster_id=NS.tendrl_context.integration_id, )) return False return True
def stop_volume(self, volume_name, host=None, force=None): args = {} if host: args.update({"host": host}) if force: args.update({"force": force}) out, err, rc = stop_volume.stop_volume(volume_name, **args) if rc == 0: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Volume %s stopped successfully" % volume_name }, cluster_id=NS.tendrl_context.integration_id, )) else: Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "Volume stop failed for volume " "%s. Details: %s" % (volume_name, out) }, cluster_id=NS.tendrl_context.integration_id, )) return False return True
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Checking if volume %s doesnt exist" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) try: NS._int.client.read('clusters/%s/Volumes/%s' % (NS.tendrl_context.integration_id, self.parameters['Volume.vol_id'])) except etcd.EtcdKeyNotFound: Event( Message( priority="warning", publisher=NS.publisher_id, payload={ "message": "Volume %s doesnt exist" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) return True return False
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={"message": "Checking if update parameters are valid"}, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) if 'Pool.pg_num' in self.parameters: fetched_pool = Pool(pool_id=self.parameters['Pool.pool_id']).load() if self.parameters['Pool.pg_num'] <= int(fetched_pool.pg_num): Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "New pg-num cannot be less than " "existing value" }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) raise AtomExecutionFailedError( "New pg-num cannot be less than existing value") return True
def run(self): if NS.gdeploy_plugin.stop_volume( self.parameters.get('Volume.volname')): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Stopped the volume %s successfully" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) else: Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "Failed to stop the volume %s" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) return False return True
def run(self): pool_id = self.parameters['Rbd.pool_id'] rbd_name = self.parameters['Rbd.name'] Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleting rbd %s on pool %s" % (self.parameters['Rbd.name'], self.parameters['Rbd.pool_id']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) crud = RbdCrud() resp = crud.delete_rbd(pool_id, rbd_name) try: crud.sync_request_status(resp['request']) except RequestStateError as ex: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Failed to delete rbd %s." " Error: %s" % (self.parameters['Rbd.name'], ex) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False NS._int.wclient.delete( "clusters/%s/Pools/%s/Rbds/%s" % (NS.tendrl_context.integration_id, self.parameters['Rbd.pool_id'], self.parameters['Rbd.name']), recursive=True) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleted rbd %s on pool-id %s" % (self.parameters['Rbd.name'], self.parameters['Rbd.pool_id']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return True
def add_osds(parameters): # Get the list of existing mons created_mons = ceph_help.existing_mons(parameters) osd_ips = [] for node, config in parameters["Cluster.node_configuration"].iteritems(): osd_ips.append(config["provisioning_ip"]) # If osds passed create and add them if len(osd_ips) > 0: Event( Message( job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={ "message": "Creating OSDs on nodes: %s of cluster: %s" % (str(osd_ips), parameters['TendrlContext.integration_id']) })) create_ceph_help.create_osds(parameters, created_mons) Event( Message( job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={ "message": "Created OSDs on nodes: %s of cluster: %s" % (str(osd_ips), parameters['TendrlContext.integration_id']) }))
def test_constructor_Messsage(): init() msg = Message(priority="info", publisher="node_context", payload={"message": "Test Message"}) assert msg.priority == "info" assert msg.publisher == "node_context" assert msg.caller is not None assert msg.message_id is not None msg = Message("info", "node_context", message_id=1, timestamp=now(), payload={"message": "Test Message"}) assert msg.message_id == 1 assert isinstance(msg.timestamp, datetime.datetime) obj_caller = getframeinfo(stack()[1][0]) obj_caller = { "filename": obj_caller.filename, "line_no": obj_caller.lineno, "function": obj_caller.function } msg = Message("info", "node_context", payload={"message": "Test Message"}, caller=obj_caller) msg = Message(priority="info", publisher="node_context", payload={"message": "Test Message"}, node_id="Test id") assert msg.node_id == "Test id"
def __init__(self, module_path, publisher_id=None, node_id=None, socket_path=None, **kwargs): self.module_path = modules.__path__[0] + "/" + module_path self.socket_path = socket_path or NS.config.data['logging_socket_path'] self.publisher_id = publisher_id or NS.publisher_id self.node_id = node_id or NS.node_context.node_id if not os.path.isfile(self.module_path): Event(Message(priority="debug", publisher=self.publisher_id, payload={ "message": "Module path: %s does not exist" % self.module_path }, node_id=self.node_id), socket_path=self.socket_path) raise AnsibleModuleNotFound(module_path=self.module_path) if kwargs == {}: Event(Message(priority="debug", publisher=self.publisher_id, payload={"message": "Empty argument dictionary"}, node_id=self.node_id), socket_path=self.socket_path) raise ValueError else: self.argument_dict = kwargs self.argument_dict['_ansible_selinux_special_fs'] = \ ['nfs', 'vboxsf', 'fuse', 'ramfs']
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Checking if rebalance is not running" }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, ) ) try: rebal_status = NS._int.client.read( 'clusters/%s/Volumes/%s/rebal_status' % ( NS.tendrl_context.integration_id, self.parameters['Volume.vol_id'] ) ).value if rebal_status is not None: if rebal_status == "not applicable" or\ rebal_status == "not_started" or\ rebal_status == "completed": return True if rebal_status == "in progress": return False Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Volume rebalance status is %s" % rebal_status }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, ) ) return False else: return True except etcd.EtcdKeyNotFound as ex: Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "Volume %s not found" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, ) ) return False
def _run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={"message": "%s running" % self.__class__.__name__})) # Check if monitor key exists, if not sync try: NS._int.client.read("clusters/%s/_mon_key" % NS.tendrl_context.integration_id) except etcd.EtcdKeyNotFound: out, err, rc = cmd_utils.Command( "ceph auth get mon. --cluster %s" % NS.tendrl_context.cluster_name).run() if rc != 0: Event( Message(priority="debug", publisher=NS.publisher_id, payload={ "message": "Couldn't get monitor key. Error:%s" % err })) else: if out and out != "": mon_sec = out.split('\n')[1].strip().split( ' = ')[1].strip() NS._int.wclient.write( "clusters/%s/_mon_key" % NS.tendrl_context.integration_id, mon_sec) while not self._complete.is_set(): gevent.sleep(int(NS.config.data.get("sync_interval", 10))) try: NS._int.wclient.write("clusters/%s/sync_status" % NS.tendrl_context.integration_id, "in_progress", prevExist=False) except (etcd.EtcdAlreadyExist, etcd.EtcdCompareFailed) as ex: pass cluster_data = ceph.heartbeat(NS.tendrl_context.cluster_id) self.on_heartbeat(cluster_data) _cluster = NS.tendrl.objects.Cluster( integration_id=NS.tendrl_context.integration_id) if _cluster.exists(): _cluster.sync_status = "done" _cluster.last_sync = str(now()) _cluster.save() Event( Message( priority="info", publisher=NS.publisher_id, payload={"message": "%s complete" % self.__class__.__name__}))
def run(self): attrs = dict(pool_id=self.parameters['Rbd.pool_id'], size=str(self.parameters['Rbd.size'])) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Re-sizing rbd %s on pool %s to %s" % (self.parameters['Rbd.name'], self.parameters['Rbd.pool_id'], self.parameters['Rbd.size']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, ) ) crud = Crud() resp = crud.update("rbd", self.parameters['Rbd.name'], attrs) try: crud.sync_request_status(resp['request']) except RequestStateError as ex: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Failed to resize rbd %s." " Error: %s" % (self.parameters['Rbd.name'], ex) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, ) ) return False Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Successfully re-sized rbd %s on pool-id %s to " "%s" % (self.parameters['Rbd.name'], self.parameters['Rbd.pool_id'], self.parameters['Rbd.size']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, ) ) return True
def run(self): pool_id = self.parameters['Pool.pool_id'] Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleting pool-id %s" % self.parameters['Pool.pool_id'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) crud = Crud() ret_val = crud.delete("pool", pool_id) if ret_val['response'] is not None and \ ret_val['response']['error'] is True: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Failed to delete pool %s." " Error: %s" % (self.parameters['Pool.poolname'], ret_val['error_status']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False # TODO(shtripat) Use namespace tree and not etcd orm later NS.etcd_orm.client.delete("clusters/%s/Pools/%s" % (NS.tendrl_context.integration_id, self.parameters['Pool.pool_id']), recursive=True) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleted pool-id %s" % self.parameters['Pool.pool_id'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return True
def run(self): pool_id = self.parameters['Pool.pool_id'] Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleting pool-id %s" % self.parameters['Pool.pool_id'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) crud = Crud() resp = crud.delete("pool", pool_id) try: crud.sync_request_status(resp['request']) except RequestStateError as ex: Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "Failed to delete pool %s." " Error: %s" % (self.parameters['Pool.poolname'], ex) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False # TODO(shtripat) Use namespace tree and not etcd orm later NS._int.wclient.delete("clusters/%s/Pools/%s" % (NS.tendrl_context.integration_id, self.parameters['Pool.pool_id']), recursive=True) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleted pool-id %s" % self.parameters['Pool.pool_id'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return True
def run(self): attrs = dict(pool_id=self.parameters['Rbd.pool_id'], size=str(self.parameters['Rbd.size'])) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Re-sizing rbd %s on pool %s to %s" % (self.parameters['Rbd.name'], self.parameters['Rbd.pool_id'], self.parameters['Rbd.size']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) crud = Crud() ret_val = crud.update("rbd", self.parameters['Rbd.name'], attrs) if ret_val['response'] is not None and \ ret_val['response']['error'] is True: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Failed to resize rbd %s." " Error: %s" % (self.parameters['Rbd.name'], ret_val['error_status']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Successfully re-sized rbd %s on pool-id %s to " "%s" % (self.parameters['Rbd.name'], self.parameters['Rbd.pool_id'], self.parameters['Rbd.size']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return True
def run(self): vol_id = self.parameters['Volume.vol_id'] Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Stopping the volume %s before delete" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) subprocess.call([ 'gluster', 'volume', 'stop', self.parameters.get('Volume.volname'), '--mode=script' ]) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleting the volume %s" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) subprocess.call([ 'gluster', 'volume', 'delete', self.parameters.get('Volume.volname'), '--mode=script' ]) NS.etcd_orm.client.delete("clusters/%s/Volumes/%s" % (NS.tendrl_context.integration_id, self.parameters['Volume.vol_id']), recursive=True) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleted the volume %s" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) return True
def expand_gluster(parameters): node_ips = get_node_ips(parameters) plugin = NS.gluster_provisioner.get_plugin() Event( Message(job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={ "message": "Setting up gluster nodes %s" % parameters['TendrlContext.integration_id'] })) ret_val = plugin.setup_gluster_node(node_ips, repo=NS.config.data.get( 'glusterfs_repo', None)) if ret_val is not True: raise FlowExecutionFailedError("Error setting up gluster node") Event( Message(job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={ "message": "Expanding gluster cluster %s" % parameters['TendrlContext.integration_id'] })) failed_nodes = [] for node in node_ips: ret_val = plugin.expand_gluster_cluster(node) if not ret_val: failed_nodes.append(node) if failed_nodes: raise FlowExecutionFailedError( "Error expanding gluster cluster. Following nodes failed: %s" % ",".join(failed_nodes)) Event( Message(job_id=parameters['job_id'], flow_id=parameters['flow_id'], priority="info", publisher=NS.publisher_id, payload={ "message": "Expanded Gluster Cluster %s." " New nodes are: %s" % (parameters['TendrlContext.integration_id'], ",".join(node_ips)) }))
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleting ec-profile %s" % self.parameters['ECProfile.name'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) crud = Crud() resp = crud.delete("ec_profile", self.parameters['ECProfile.name']) try: crud.sync_request_status(resp['request']) except RequestStateError as ex: Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "Failed to delete ec-profile %s." " Error: %s" % (self.parameters['ECProfile.name'], ex) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False NS._int.wclient.delete("clusters/%s/ECProfiles/%s" % (NS.tendrl_context.integration_id, self.parameters['ECProfile.name']), recursive=True) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleted ec-profile %s" % self.parameters['ECProfile.name'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return True
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={"message": "Checking if update parameters are valid"}, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) if 'Pool.poolname' in self.parameters and \ ('Pool.pg_num' in self.parameters or 'Pool.size' in self.parameters or 'Pool.pg_num' in self.parameters or 'Pool.min_size' in self.parameters or 'Pool.quota_enabled' in self.parameters): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Invalid combination of pool update parameters. " "Pool name shouldnt be updated with other parameters." }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) raise AtomExecutionFailedError( "Invalid combination of pool update parameters. " "Pool name shoulnt be update with other parameters.") if 'Pool.pg_num' in self.parameters: fetched_pool = Pool(pool_id=self.parameters['Pool.pool_id']).load() if self.parameters['Pool.pg_num'] <= fetched_pool.pg_num: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "New pg-num cannot be less than " "existing value" }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) raise AtomExecutionFailedError( "New pg-num cannot be less than existing value") return True
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleting ec-profile %s" % self.parameters['ECProfile.name'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) crud = Crud() ret_val = crud.delete("ec_profile", self.parameters['ECProfile.name']) if ret_val['response'] is not None and \ ret_val['response']['error'] is True: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Failed to delete ec-profile %s." " Error: %s" % (self.parameters['ECProfile.name'], ret_val['error_status']) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return False NS.etcd_orm.client.delete("clusters/%s/ECProfiles/%s" % (NS.tendrl_context.integration_id, self.parameters['ECProfile.name']), recursive=True) Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Deleted ec-profile %s" % self.parameters['ECProfile.name'], }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], cluster_id=NS.tendrl_context.integration_id, )) return True
def discover_storage_system(self): ret_val = {} # get the gluster version details cmd = subprocess.Popen("ceph --version", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = cmd.communicate() if err and 'command not found' in err: Event( Message(priority="debug", publisher=NS.publisher_id, payload={"message": "ceph not installed on host"})) return ret_val if out: details = out.split() ret_val['pkg_version'] = details[2] ret_val['pkg_name'] = details[0] # get the cluster_id details os_name = NS.platform.os cfg_file = "" cluster_name = None if os_name in ['CentOS Linux', 'Red Hat Enterprise Linux Server']: cfg_file = '/etc/sysconfig/ceph' # TODO(shtripat) handle the case of ubuntu if cfg_file != "": if not os.path.exists(cfg_file): Event( Message(priority="debug", publisher=NS.publisher_id, payload={ "message": "config file: %s not found" % cfg_file })) return ret_val with open(cfg_file) as f: for line in f: if line.startswith("CLUSTER="): cluster_name = line.split('\n')[0].split('=')[1] if cluster_name: raw_data = ini2json.ini_to_dict("/etc/ceph/%s.conf" % cluster_name) if "global" in raw_data: ret_val['detected_cluster_id'] = raw_data['global']['fsid'] ret_val['detected_cluster_name'] = cluster_name return ret_val
def run(self): service_name = self.parameters.get("Service.name") Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Checking status of service %s on node %s" % ( service_name, self.parameters.get("fqdn") ) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], ) ) response = ServiceStatus(service_name).status() # and then check the response... if response: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Service %s running on node %s" % ( service_name, self.parameters.get("fqdn") ) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], ) ) return True else: Event( Message( priority="error", publisher=NS.publisher_id, payload={ "message": "Failed to check status of service %s on " "node %s" % ( service_name, self.parameters.get("fqdn") ) }, job_id=self.parameters['job_id'], flow_id=self.parameters['flow_id'], ) ) return False
def run(self): Event( Message( priority="info", publisher=NS.publisher_id, payload={"message": "Checking if rebalance is running"}, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) try: rebal_status = NS._int.client.read( 'clusters/%s/Volumes/%s/rebal_status' % (NS.tendrl_context.integration_id, self.parameters['Volume.vol_id'])).value if rebal_status is not None: if rebal_status in ["in progress", "in_progress"]: return True else: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "No rebalance running for" " volume %s" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) return False else: return False except etcd.EtcdKeyNotFound: Event( Message( priority="info", publisher=NS.publisher_id, payload={ "message": "Volume %s not found" % self.parameters['Volume.volname'] }, job_id=self.parameters["job_id"], flow_id=self.parameters["flow_id"], cluster_id=NS.tendrl_context.integration_id, )) return False