def set_authorized_key(self, user_name): path = '/home/{}/.ssh/'.format(user_name) cmd = 'touch {}authorized_keys'.format(path) ret, out, err = exec_command_ex(cmd) cmd2 = 'cat {}id_rsa.pub >> {}authorized_keys'.format(path, path) ret2, out2, err2 = exec_command_ex(cmd2)
def create_vlan(self, eth, vlan_id): eth_name = eth + "." + vlan_id cmd = 'ip link add link {} name {} type vlan id {}'.format( eth, eth_name, vlan_id) ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: return False cmd = 'ip link set dev {} up'.format(eth_name) ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: return False return True
def add_replication_sys_user(self, user_name, user_id): cmd = "getent group {}".format(self.group) ret, out, err = exec_command_ex(cmd) if ret != 0: cmd2 = "groupadd -g {} {}".format(self.gid, self.group) ret2, out2, err2 = exec_command_ex(cmd2) if ret2 != 0 and err2: logger.error("Error Creating the {} group ".format(self.group)) cmd3 = "useradd {} -s /bin/bash -md /home/{} -g {} -u {}".format( user_name, user_name, self.group, user_id) ret3, out3, err3 = exec_command_ex(cmd3) if ret3 != 0 and err3 and 'already exists' in err3: raise ReplicationException(ReplicationException.SYSTEM_USER_EXIST, 'ThisSystemUserAlreadyExists:')
def send(self, server_ip): if server_ip is not None and len(server_ip) > 0: while len(self.metrics_list) > 0: start_index = 0 cmd = "echo " if len(self.metrics_list) > self.max_limit: end_index = self.max_limit else: end_index = len(self.metrics_list) metrics_sublist = self.metrics_list[start_index:end_index] for metric in metrics_sublist: if metric is not metrics_sublist[0] and metric.startswith( "\"PetaSAN."): metric = metric.replace("\"PetaSAN.", "\"\nPetaSAN.") cmd += metric cmd += " | nc -q0 {server_ip} 2003".format( server_ip=server_ip) ret, stdout, stderr = exec_command_ex(cmd) if stderr is not None and len(stderr) > 0: self.clear_metrics_list() raise Exception("Error running echo command :" + cmd) self.metrics_list = self.metrics_list[end_index:]
def run(self): ''' This function will be executed when we call the start method of any object in our PoolCheckerThread class ''' # Get which ceph user is using this function & get his keyring file path # # ====================================================================== # ceph_auth = CephAuthenticator() cmd = 'ceph pg ls-by-pool {} --format json-pretty {} --cluster {}'.format( self.pool, ceph_auth.get_authentication_string(), self.cluster_name) ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: if stderr and ('Connection timed out' in stderr or 'error connecting' in stderr): logger.error('Error in Ceph Connection cmd:' + cmd) raise CephException(CephException.CONNECTION_TIMEOUT, 'ConnectionTimeError') logger.error('General error in Ceph cmd:' + cmd) raise CephException(CephException.GENERAL_EXCEPTION, 'GeneralCephException') output = stdout pgdp = PGDumpParser() pgdp.parse(output) self.active_pgs_num = pgdp.active_pgs self.active_osds_num = pgdp.active_osds return
def add_ceph_user(self, user_name, pool_list): config = configuration() cluster_name = config.get_cluster_name() pool_string = "" if len(pool_list) > 0: for pool in pool_list: pool_string += "\'profile rbd pool=" + pool + "\'," if pool_string[-1] == ",": pool_string = pool_string[:-1] else: pool_string = "\'profile rbd\'" cmd = "ceph auth get-or-create client.{} mgr 'allow r' mon 'profile rbd' osd {} >> /etc/ceph/{}.client.{}.keyring " \ "--cluster {} ".format (user_name, pool_string, cluster_name, user_name, cluster_name) ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: if stderr: if 'Connection timed out' in stderr or 'error connecting' in stderr: logger.error('Error in Ceph Connection cmd:' + cmd) raise CephException(CephException.CONNECTION_TIMEOUT, 'ConnectionTimeError') logger.error('General error in Ceph cmd:' + cmd) raise CephException(CephException.GENERAL_EXCEPTION, 'GeneralCephException')
def unmount( path, do_rm=True, ): """ Unmount and removes the given mount point. """ try: logger.debug('Unmounting %s', path) exec_command_ex('/bin/umount -- ' + path) except subprocess.CalledProcessError as e: raise Exception('Error unmonting disk.', e) if not do_rm: return os.rmdir(path)
def delete_ceph_user(self, user_name): status = True config = configuration() cluster_name = config.get_cluster_name() cmd = "ceph auth del client.{} --cluster {}".format( user_name, cluster_name) ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: if stderr: if 'does not exist' in stderr: logger.error('Error in Ceph Connection cmd:' + cmd) raise ReplicationException( ReplicationException.CEPH_USER_DOES_NOT_EXIST, 'UserNotExist') if 'Connection timed out' in stderr or 'error connecting' in stderr: logger.error('Error in Ceph Connection cmd:' + cmd) raise CephException(CephException.CONNECTION_TIMEOUT, 'ConnectionTimeError') logger.error('General error in Ceph cmd:' + cmd) raise CephException(CephException.GENERAL_EXCEPTION, 'GeneralCephException') keyring_file = "/etc/ceph/" + cluster_name + ".client." + user_name + ".keyring" if os.path.exists(keyring_file): os.remove(keyring_file) return status
def get_auth_pools(self, user_name): config = configuration() cluster_name = config.get_cluster_name() cmd = "ceph auth get client.{} --format json --cluster {}".format( user_name, cluster_name) ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: if stderr: if 'Connection timed out' in stderr or 'error connecting' in stderr: logger.error('Error in Ceph Connection cmd:' + cmd) raise CephException(CephException.CONNECTION_TIMEOUT, 'ConnectionTimeError') logger.error('General error in Ceph cmd:' + cmd) raise CephException(CephException.GENERAL_EXCEPTION, 'GeneralCephException') user_info = json.loads(stdout) pools_info = user_info[0]['caps']['osd'] auth_pools = [] if 'profile rbd pool=' in pools_info: auth_pools_list = pools_info.replace('profile rbd pool=', "") if "," in auth_pools_list: auth_pools = auth_pools_list.split(',') else: auth_pools.append(auth_pools_list) return auth_pools
def is_system_user_exist(self, user_name): found = True cmd = "getent passwd {}".format(user_name) ret, out, err = exec_command_ex(cmd) if len(out) == 0: found = False return found
def vlan_exists(self, eth, vlan_id): cmd = 'ip -d link show {}.{}'.format(eth, vlan_id) ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: return False if "does not exist" in stderr: return False return True
def mount( dev, fstype, options, ): """ Mounts a device with given filessystem type and mount options to a tempfile path under /var/lib/ceph/tmp. """ # sanity check: none of the arguments are None if dev is None: raise ValueError('dev may not be None') if fstype is None: raise ValueError('fstype may not be None') # pick best-of-breed mount options based on fs type if options is None: options = "noatime" myTemp = STATEDIR + '/tmp' # mkdtemp expect 'dir' to be existing on the system # Let's be sure it's always the case if not os.path.exists(myTemp): os.makedirs(myTemp) # mount path = tempfile.mkdtemp( prefix='mnt.', dir=myTemp, ) try: logger.debug('Mounting %s on %s with options %s', dev, path, options) cmd = 'mount -t ' + fstype + '-o ' + options + ' -- ' + dev + ' ' + path exec_command_ex(cmd) if which('restorecon'): cmd = 'restorecon ' + path exec_command_ex(cmd) except subprocess.CalledProcessError as e: try: os.rmdir(path) except (OSError, IOError): pass raise Exception('Error Mounting disk.', e) return path
def get_eth_name(self, ip): cmd = 'ip addr | grep {}'.format(ip) ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: return None if stdout and len(stdout) > 0: eth_name = stdout.split()[-1] return eth_name return None
def set_disk_metadata(args): io_ctx = None ceph_api = CephAPI() cluster = None try: cluster = ceph_api.connect() io_ctx = cluster.open_ioctx(args.pool) # Get which ceph user is using this function & get his keyring file path # ceph_auth = CephAuthenticator() config = configuration() cluster_name = config.get_cluster_name() if args.file: with open(str(args.file), 'r') as file: disk_metadata_str = file.read() else: disk_metadata = sys.stdin.readlines() disk_metadata_str = ''.join( str(line) for line in disk_metadata) # converting list to string # read object meta : cmd = "rbd info " + args.pool + "/" + str( args.image) + " " + ceph_auth.get_authentication_string( ) + " --cluster " + cluster_name + " | grep rbd_data" ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: if stderr: cluster.shutdown() print("Cannot get image meta object from rbd header.") rbd_data = stdout.rstrip().strip() dot_indx = rbd_data.rfind(".") image_id = rbd_data[(dot_indx + 1):] meta_object = "rbd_header." + image_id attr_object = meta_object io_ctx.set_xattr(str(attr_object), str(ConfigAPI().get_image_meta_key()), disk_metadata_str) io_ctx.close() cluster.shutdown() sys.exit(0) except Exception as e: print("Error in executing script function : set_disk_metadata , " + str(e.message)) io_ctx.close() cluster.shutdown() sys.exit(-1)
def get_current_system_user(self): try: cmd = "id -u -n " ret, out, err = exec_command_ex(cmd) return out except Exception as e: logger.error(e)
def read_disks_metadata(args): io_ctx = None ceph_api = CephAPI() cluster = None try: cluster = ceph_api.connect() io_ctx = cluster.open_ioctx(args.pool) # Get which ceph user is using this function & get his keyring file path # ceph_auth = CephAuthenticator() config = configuration() cluster_name = config.get_cluster_name() cmd = "rbd info " + args.pool + "/" + str( args.image) + " " + ceph_auth.get_authentication_string( ) + " --cluster " + cluster_name + " | grep rbd_data" ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: if stderr: cluster.shutdown() print("Cannot get image meta object from rbd header.") sys.exit(-1) rbd_data = stdout.rstrip().strip() dot_indx = rbd_data.rfind(".") image_id = rbd_data[(dot_indx + 1):] rbd_header_object = "rbd_header." + image_id try: ret = io_ctx.get_xattr(rbd_header_object, meta_key) except: ret = io_ctx.get_xattr(rbd_header_object[:-1], meta_key) io_ctx.close() cluster.shutdown() if ret: print(ret) sys.stdout.flush() sys.exit(0) else: # Non-PetaSAN Disk : sys.exit(-1) except Exception as e: print("Error in executing script function : read_disks_metadata , " + str(e.message)) io_ctx.close() cluster.shutdown() sys.exit(-1)
def is_ceph_user_exist(self, user_name): found = True cmd = "ceph auth get client.{}".format(user_name) ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: if stderr: if "Error" in stderr: found = False return found
def rollback_to_snapshot(self, pool_name, image_name, snap_name): # Get which ceph user is using this function & get his keyring file path # ceph_auth = CephAuthenticator() config = configuration() cluster_name = config.get_cluster_name() cmd = 'rbd snap rollback {}/{}@{} {} --cluster {}'.format(pool_name, image_name, snap_name, ceph_auth.get_authentication_string(), cluster_name) ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: logger.error('General error in Ceph cmd : ' + cmd) raise CephException(CephException.GENERAL_EXCEPTION, 'GeneralCephException') return True
def get_replication_sys_users(self): users_list = [] cmd = 'cut -d: -f1 /etc/passwd | xargs groups | grep {}'.format( self.group) ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: return users_list out = stdout.split('\n') if len(out[-1]) == 0: del out[-1] for user in out: if user.split(':')[1].replace(" ", "") == self.group: user_name = user.split(':')[0].replace(" ", "") users_list.append(user_name) return users_list
def _write_file_lines(self): rand = self._get_rand_string(6) bin_file = self.CRUSH_SAVE_PATH + 'crushmap-tmp-' + rand + '.bin' txt_file = self.CRUSH_SAVE_PATH + 'crushmap-tmp-' + rand + '.txt' with open(txt_file, 'w') as f: for line in self.lines_tunables: f.writelines(line + '\n') f.writelines('\n') for line in self.lines_devices: f.writelines(line + '\n') f.writelines('\n') for line in self.lines_types: f.writelines(line + '\n') f.writelines('\n') for line in self.lines_buckets: f.writelines(line + '\n') f.writelines('\n') for line in self.lines_rules: f.writelines(line + '\n') f.writelines('\n') cmd = 'crushtool -c ' + txt_file + ' -o ' + bin_file if not call_cmd(cmd): raise CrushException(CrushException.COMPILE, 'Crush Compile Error') cluster_name = configuration().get_cluster_name() cmd = 'ceph osd setcrushmap -i ' + bin_file + ' --cluster ' + cluster_name ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: if stderr and ('Connection timed out' in stderr or 'error connecting' in stderr): logger.error('Error in Ceph Connection cmd:' + cmd) raise CephException(CephException.CONNECTION_TIMEOUT, 'Connection Timeout Error') logger.error('General error in Ceph cmd:' + cmd + ' error:' + stderr) raise CephException(CephException.GENERAL_EXCEPTION, 'General Ceph Error') call_cmd('rm ' + txt_file) call_cmd('rm ' + bin_file)
def get_ceph_users(self): config = configuration() cluster_name = config.get_cluster_name() cmd = "ceph auth ls --format json --cluster {}".format(cluster_name) ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: if stderr: if 'Connection timed out' in stderr or 'error connecting' in stderr: logger.error('Error in Ceph Connection cmd:' + cmd) raise CephException(CephException.CONNECTION_TIMEOUT, 'ConnectionTimeError') logger.error('General error in Ceph cmd:' + cmd) raise CephException(CephException.GENERAL_EXCEPTION, 'GeneralCephException') users = json.loads(stdout) return users['auth_dump']
def get_all_images(self, pool_name): # Get which ceph user is using this function & get his keyring file path # ceph_auth = CephAuthenticator() images = [] config = configuration() cluster_name = config.get_cluster_name() cmd = 'rbd ls {} {} --cluster {}'.format(pool_name, ceph_auth.get_authentication_string(),cluster_name) ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: logger.error('General error in Ceph cmd : ' + cmd) raise CephException(CephException.GENERAL_EXCEPTION, 'GeneralCephException') ls = stdout.splitlines() for image in ls: images.append(image) return images
def get_pools_used_space(self): pools_used_space = {} cluster_name = configuration().get_cluster_name() cmd = "ceph df --format json-pretty --cluster {}".format(cluster_name) ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: if stderr: logger.error("Cannot run cmd : {}".format(cmd)) stdout_data = json.loads(stdout) pools_ls = stdout_data["pools"] for pool in pools_ls: pool_name = pool["name"] pool_stats = pool["stats"] pool_used_space = round(float(pool_stats["percent_used"]), 1) pools_used_space[pool_name] = pool_used_space return pools_used_space
def get_conf(cluster, variable): """ Get the value of the given configuration variable from the cluster. :raises: Error if call to ceph-conf fails. :return: The variable value or None. """ try: cmd = 'ceph-conf --cluster=' + cluster + ' --name=osd. --lookup ' + variable ret, out, err = exec_command_ex(cmd) except OSError as e: raise logger.error('error executing ceph-conf', e, err) if ret == 1: # config entry not found return None elif ret != 0: raise logger.error('getting variable from configuration failed') value = out.split('\n', 1)[0] # don't differentiate between "var=" and no var set if not value: return None return value
def readImageMetaData(ioctx, image, pool): ret = None # Get which ceph user is using this function & get his keyring file path # ceph_auth = CephAuthenticator() config = configuration() cluster_name = config.get_cluster_name() try: cmd = "rbd info " + pool + "/" + str( image) + " " + ceph_auth.get_authentication_string( ) + " --cluster " + cluster_name + " | grep rbd_data" ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: if stderr: logger.error("Cannot get image meta object from rbd header.") return None rbd_data = stdout.rstrip().strip() dot_indx = rbd_data.rfind(".") image_id = rbd_data[(dot_indx + 1):] rbd_header_object = "rbd_header." + image_id try: ret = ioctx.get_xattr(rbd_header_object, meta_key) except: ret = ioctx.get_xattr(rbd_header_object[:-1], meta_key) except: return None return ret
def update_auth_pools(self, user_name, pool_list): config = configuration() cluster_name = config.get_cluster_name() pool_string = "" if len(pool_list) > 0: for pool in pool_list: pool_string += "\'profile rbd pool=" + pool + "\'," if pool_string[-1] == ",": pool_string = pool_string[:-1] else: pool_string = "\'profile rbd\'" cmd = "ceph auth caps client.{} mgr 'allow r' mon 'profile rbd' osd {} --cluster {}".format( user_name, pool_string, cluster_name) ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: if stderr: logger.error('failed to run cmd ' + cmd) return False return False return True
def run(self): consul = ConsulAPI() job_id = self.active_job_id.split('-')[0] active_job = consul.get_replication_active_job(job_id) while True: if len(self.uncompressed_file_path) > 0 and os.path.exists( self.uncompressed_file_path): for i in range(5): try: if os.stat(self.uncompressed_file_path).st_size > 0: with open(self.uncompressed_file_path, 'r') as outfile: uncompressed_data_json = outfile.read() if len(uncompressed_data_json) > 0: uncompressed_data = json.loads( uncompressed_data_json) self.progress[ 'uncompressed_transferred_bytes'] = uncompressed_data[ 'transferred_bytes'] self.progress[ 'uncompressed_transferred_rate'] = uncompressed_data[ 'transfer_rate'] active_job.uncompressed_transferred_bytes = self.progress[ 'uncompressed_transferred_bytes'] active_job.uncompressed_transferred_rate = self.progress[ 'uncompressed_transferred_rate'] start_time = datetime.datetime.strptime( active_job.start_time, "%Y-%m-%d %H:%M:%S") now_string_format = str( datetime.datetime.now()).split('.')[0] now = datetime.datetime.strptime( now_string_format, "%Y-%m-%d %H:%M:%S") elapsed_time = now - start_time active_job.elapsed_time = str( elapsed_time).split( ':')[0] + ':' + str( elapsed_time).split(':')[1] break except Exception as e: logger.error(e) logger.error( "Error in open uncompressed progress file") time.sleep(2) continue else: break if len(self.compressed_file_path) > 0 and os.path.exists( self.compressed_file_path): for j in range(5): try: if os.stat(self.compressed_file_path).st_size > 0: with open(self.compressed_file_path, 'r') as outfile: compressed_data_json = outfile.read() if len(compressed_data_json) > 0: compressed_data = json.loads( compressed_data_json) self.progress[ 'compressed_transferred_bytes'] = compressed_data[ 'transferred_bytes'] self.progress[ 'compressed_transferred_rate'] = compressed_data[ 'transfer_rate'] uncompressed = self.progress[ 'uncompressed_transferred_bytes'].strip( ) compressed = self.progress[ 'compressed_transferred_bytes'].strip( ) if len(compressed) > 0 and len( uncompressed) > 0: uncompressed_val = float( re.findall(r'-?\d+\.?\d*', uncompressed)[0]) compressed_val = float( re.findall(r'-?\d+\.?\d*', compressed)[0]) if compressed_val > 0: self.progress['ratio'] = str( round( uncompressed_val / compressed_val, 2)) active_job.compressed_transferred_bytes = self.progress[ 'compressed_transferred_bytes'] active_job.compressed_transferred_rate = self.progress[ 'compressed_transferred_rate'] active_job.compression_ratio = self.progress[ "ratio"] break except Exception as e: logger.error(e) time.sleep(2) continue if len(self.progress_file_path) > 0 and os.path.exists( self.progress_file_path): for j in range(5): try: if os.stat(self.progress_file_path).st_size > 0: cmd = "tac {} | grep Importing -m 1".format( self.progress_file_path) ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: if stderr: logger.error(stderr) if stdout and "complete" in stdout: output = stdout.split(':') i = len(output) - 1 while i >= 0: if "complete..." in output[i]: progress_element = output[i] self.progress[ 'percentage'] = progress_element.split( "complete...")[0].strip() break i -= 1 active_job.progress = self.progress['percentage'] break except Exception as e: logger.error(e) time.sleep(2) continue confirm = consul.update_replication_active_job(active_job) time.sleep(6) return
def delete_tmp_file(self, file_name): cmd = "rm -f {}".format(file_name) ret, stdout, stderr = exec_command_ex(cmd) if ret != 0: logger.error('Manage Tmp File | Error , delete tmp file : ' + cmd) return True
import argparse from PetaSAN.core.common.log import logger from PetaSAN.core.common.cmd import exec_command_ex, call_cmd MAX_OPEN_FILES = 102400 parser = argparse.ArgumentParser(description='This is a script that will start up the configured consul client.') join = '' for node in configuration().get_remote_nodes_config(""): join= join + " -retry-join {} ".format(node.backend_1_ip) logger.info("consul start up string {}".format(join)) str_start_command = "consul agent -config-dir /opt/petasan/config/etc/consul.d/client " str_start_command = str(str_start_command)+ join+' >/dev/null 2>&1 &' subprocess.Popen(str_start_command, shell=True) # Increase max open files for Consul process : # ============================================ pid_cmd = "ps aux | grep consul | grep agent" ret, stdout, stderr = exec_command_ex(pid_cmd) line_1 = stdout.splitlines()[0] pid = line_1.split()[1] ulimit_cmd = "prlimit -n" + str(MAX_OPEN_FILES) + " -p " + pid call_cmd(ulimit_cmd)
def get_dest_cluster_fsid(self, dest_cluster): mng_file = ManageTmpFile() # Get destination cluster info # # ---------------------------- # dest_user_name = dest_cluster.user_name dest_cluster_ip = dest_cluster.remote_ip decrypted_key = dest_cluster.ssh_private_key # Save private key in text file # # ----------------------------- # config_api = ConfigAPI() directory_path = config_api.get_replication_tmp_file_path() if not os.path.exists(directory_path): os.makedirs(directory_path) sshkey_path = config_api.get_replication_sshkey_file_path() mng_file.create_tmp_file(sshkey_path, decrypted_key) # Change mod of 'sshkey_path' file # # -------------------------------- # cmd = "chmod 600 {}".format(sshkey_path) ret, out, err = exec_command_ex(cmd) # Run script remotely at destination cluster as follows # # ----------------------------------------------------- # script_file = ConfigAPI().get_replication_script_path() parser_key = "cluster-fsid" # Define parser key # Define cmd command cmd = 'ssh -o StrictHostKeyChecking=no -i {} {}@{} "{} {}"'.format( sshkey_path, dest_user_name, dest_cluster_ip, script_file, parser_key) ret, stdout, stderr = exec_command_ex(cmd) # Delete 'sshkey_path' file # # ------------------------- # mng_file.delete_tmp_file(sshkey_path) if ret != 0: if stderr and ('Connection timed out' in stderr): logger.error( 'Manage Destination Cluster | Connection timed out , Error = ' + str(stderr)) raise ReplicationException( ReplicationException.CONNECTION_TIMEOUT, 'Connection timed out') elif stderr and ('Connection refused' in stderr): logger.error( 'Manage Destination Cluster | Connection refused , Error = ' + str(stderr)) raise ReplicationException( ReplicationException.CONNECTION_REFUSED, 'Connection refused') elif stderr and ('Permission denied' in stderr): logger.error( 'Manage Destination Cluster | Permission denied , Error = ' + str(stderr)) raise ReplicationException( ReplicationException.PERMISSION_DENIED, 'Permission denied') elif stderr and ("warning" not in stderr.lower()): logger.error( 'Manage Destination Cluster | Cannot get destination cluster fsid , Error = ' + str(stderr)) raise ReplicationException( ReplicationException.GENERAL_EXCEPTION, 'Cannot get destination cluster fsid') elif stdout and ('Error' in stdout): logger.error( 'Manage Destination Cluster | Cannot get destination cluster fsid.' ) raise Exception(str(stdout)) else: logger.error( 'Manage Destination Cluster | Cannot get destination cluster fsid.' ) raise ReplicationException( ReplicationException.GENERAL_EXCEPTION, 'Cannot get destination cluster fsid') if stdout and ('Error' in stdout): logger.error( 'Manage Destination Cluster | Cannot get destination cluster fsid.' ) raise ReplicationException(ReplicationException.GENERAL_EXCEPTION, 'Cannot get destination cluster fsid') if stderr and ("warning" not in stderr.lower()): logger.error( 'Manage Destination Cluster | Cannot get destination cluster fsid , Error = ' + str(stderr)) raise ReplicationException(ReplicationException.GENERAL_EXCEPTION, 'Cannot get destination cluster fsid') stdout_json = json.loads(str(stdout)) return stdout_json