def check_ceph(): # node role check if not NODE_ROLE.is_fuel(): if not NODE_ROLE.is_controller(): if not NODE_ROLE.is_ceph_osd(): LOG.warn('This command can only run on fuel or controller or ceph-osd node !') return if NODE_ROLE.is_fuel(): check_all_nodes('ceph') return # get cluster status LOG.info('%s%s Checking ceph cluster status' %('='*5, '>')) ceph_check_health() # check osd status LOG.info('%s%s Checking ceph osd status' %('='*5, '>')) check_success = True osd_status = get_ceph_osd_status() if not osd_status: LOG.error('Can not get ceph osd status !') check_success = False else: for l in osd_status.split('\n'): if 'id' not in l and 'weigh' not in l and 'osd.' in l: osd = l.split()[2] status = l.split()[3] if status != 'up': LOG.error('%s status is not correct, please check it !' % osd) check_success = False if check_success: LOG.info('Ceph osd status check successfully !')
def check_mysql(): # node role check if not NODE_ROLE.is_fuel(): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on fuel or controller node !') return if NODE_ROLE.is_fuel(): check_all_nodes('mysql') return LOG.info('%s%s Checking mysql cluster status' % ('=' * 5, '>')) # get running node list for mysql cluster running_nodes = get_mysql_nodes() if running_nodes is None: LOG.error('Can not get the running node list for mysql cluster !') return # get all controller node hostname controllers = get_controllers_hostname() if controllers is None: LOG.error('Can not get the controllers node list !') return # check all controller node in mysql cluster error_nodes = [] for node in controllers: if node not in running_nodes: error_nodes.append(node) if error_nodes: LOG.error('Node %s is not running in mysql cluster !' % error_nodes) LOG.error('Mysql cluster check faild !') else: LOG.info('Mysql cluster check successfully !')
def check_rabbitmq(): # node role check if not NODE_ROLE.is_fuel(): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on fuel or controller node !') return if NODE_ROLE.is_fuel(): check_all_nodes('rabbitmq') return LOG.info('%s%s Checking rabbitmq cluster status' %('='*5, '>')) # get all controller node hostname controllers = get_controllers_hostname() if controllers is None: LOG.error('Can not get the controllers node list !') return # get masters & slaves node list running_nodes = get_rabbitmq_nodes() if running_nodes is None: LOG.error('Can not get the running node list for rabbitmq cluster !') return # check all controller nodes in masters + slaves node list error_nodes = [] for node in controllers: if node.split('.')[0] not in running_nodes: error_nodes.append(node) if error_nodes: LOG.error('Node %s not in rabbitmq cluster !' % error_nodes) LOG.error('Rabbitmq cluster check faild !') else: LOG.info('Rabbitmq cluster check successfully !')
def ami(parser): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on controller node !') else: # "if controller leave to last" if not parser.KERNEL_FILE and not parser.INITRD_FILE and not parser.IMAGE_FILE: LOG.error('Lack of arguments, you can use --help to get help infomation\n') elif not parser.KERNEL_FILE: LOG.error('Please specify the kernel file\n') elif not parser.INITRD_FILE: LOG.error('Please specify the initrd file\n') elif not parser.IMAGE_FILE: LOG.error('Please specify the image file\n') else: if parser.NAME: # split the path and filename kernel_file_name = os.path.basename(r'%s' % parser.KERNEL_FILE) initrd_file_name = os.path.basename(r'%s' % parser.INITRD_FILE) ami_image_upload(parser.KERNEL_FILE, kernel_file_name, parser.INITRD_FILE, initrd_file_name, parser.IMAGE_FILE, parser.NAME) else: # if not specify image name, use IMAGE_FILE as AMI name # split the path and filename kernel_file_name = os.path.basename(r'%s' % parser.KERNEL_FILE) initrd_file_name = os.path.basename(r'%s' % parser.INITRD_FILE) ami_image_name = os.path.basename(r'%s' % parser.IMAGE_FILE) ami_image_upload(parser.KERNEL_FILE, kernel_file_name, parser.INITRD_FILE, initrd_file_name, parser.IMAGE_FILE, ami_image_name)
def check_ceph(): # node role check if not NODE_ROLE.is_fuel(): if not NODE_ROLE.is_controller(): if not NODE_ROLE.is_ceph_osd(): LOG.warn( 'This command can only run on fuel or controller or ceph-osd node !' ) return if NODE_ROLE.is_fuel(): check_all_nodes('ceph') return # get cluster status LOG.info('%s%s Checking ceph cluster status' % ('=' * 5, '>')) ceph_check_health() # check osd status LOG.info('%s%s Checking ceph osd status' % ('=' * 5, '>')) check_success = True osd_status = get_ceph_osd_status() if not osd_status: LOG.error('Can not get ceph osd status !') check_success = False else: for l in osd_status.split('\n'): if 'id' not in l and 'weigh' not in l and 'osd.' in l: osd = l.split()[2] status = l.split()[3] if status != 'up': LOG.error('%s status is not correct, please check it !' % osd) check_success = False if check_success: LOG.info('Ceph osd status check successfully !')
def ami(parser): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on controller node !') else: # "if controller leave to last" if not parser.KERNEL_FILE and not parser.INITRD_FILE and not parser.IMAGE_FILE: LOG.error( 'Lack of arguments, you can use --help to get help infomation\n' ) elif not parser.KERNEL_FILE: LOG.error('Please specify the kernel file\n') elif not parser.INITRD_FILE: LOG.error('Please specify the initrd file\n') elif not parser.IMAGE_FILE: LOG.error('Please specify the image file\n') else: if parser.NAME: # split the path and filename kernel_file_name = os.path.basename(r'%s' % parser.KERNEL_FILE) initrd_file_name = os.path.basename(r'%s' % parser.INITRD_FILE) ami_image_upload(parser.KERNEL_FILE, kernel_file_name, parser.INITRD_FILE, initrd_file_name, parser.IMAGE_FILE, parser.NAME) else: # if not specify image name, use IMAGE_FILE as AMI name # split the path and filename kernel_file_name = os.path.basename(r'%s' % parser.KERNEL_FILE) initrd_file_name = os.path.basename(r'%s' % parser.INITRD_FILE) ami_image_name = os.path.basename(r'%s' % parser.IMAGE_FILE) ami_image_upload(parser.KERNEL_FILE, kernel_file_name, parser.INITRD_FILE, initrd_file_name, parser.IMAGE_FILE, ami_image_name)
def orphan(parser): logging.disable(logging.INFO) if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on controller node !') return # run delete orphan # run delete servers thread first nova_thread = RunNovaThread() nova_thread.start() nova_thread.join() # run other thread parallel threads = [ RunCinderThread(), RunGlanceThread(), RunNetBaseThread(), RunFirewallThread(), RunSecgroupThread(), RunVPNThread(), RunLBThread(), RunQoSThread() ] for thread in threads: thread.start() for thread in threads: thread.join() logging.disable(logging.NOTSET)
def check_rabbitmq(): # node role check if not NODE_ROLE.is_fuel(): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on fuel or controller node !') return if NODE_ROLE.is_fuel(): check_all_nodes('rabbitmq') return LOG.info('%s%s Checking rabbitmq cluster status' % ('=' * 5, '>')) # get all controller node hostname controllers = get_controllers_hostname() if controllers is None: LOG.error('Can not get the controllers node list !') return # get masters & slaves node list running_nodes = get_rabbitmq_nodes() if running_nodes is None: LOG.error('Can not get the running node list for rabbitmq cluster !') return # check all controller nodes in masters + slaves node list error_nodes = [] for node in controllers: if node.split('.')[0] not in running_nodes: error_nodes.append(node) if error_nodes: LOG.error('Node %s not in rabbitmq cluster !' % error_nodes) LOG.error('Rabbitmq cluster check faild !') else: LOG.info('Rabbitmq cluster check successfully !')
def check_mysql(): # node role check if not NODE_ROLE.is_fuel(): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on fuel or controller node !') return if NODE_ROLE.is_fuel(): check_all_nodes('mysql') return LOG.info('%s%s Checking mysql cluster status' %('='*5, '>')) # get running node list for mysql cluster running_nodes = get_mysql_nodes() if running_nodes is None: LOG.error('Can not get the running node list for mysql cluster !') return # get all controller node hostname controllers = get_controllers_hostname() if controllers is None: LOG.error('Can not get the controllers node list !') return # check all controller node in mysql cluster error_nodes = [] for node in controllers: if node not in running_nodes: error_nodes.append(node) if error_nodes: LOG.error('Node %s is not running in mysql cluster !' % error_nodes) LOG.error('Mysql cluster check faild !') else: LOG.info('Mysql cluster check successfully !')
def stack(parser): # if node role is "unknow", go back if NODE_ROLE.is_unknown(): LOG.error('Can not confirm the node role!') return if not NODE_ROLE.is_fuel(): if parser.CONTROLLER: if not NODE_ROLE.is_controller(): cmd_warn('controller') return if parser.COMPUTE: if not NODE_ROLE.is_compute(): cmd_warn('compute') return if parser.MONGO: if not NODE_ROLE.is_mongo(): cmd_warn('mongo') return if parser.CONTROLLER or parser.COMPUTE or parser.MONGO: if parser.PROFILE and not parser.SERVICE and not parser.CHECK_ALL: if parser.CONTROLLER: check('controller', 'profile') if parser.COMPUTE: check('compute', 'profile') if parser.MONGO: check('mongo', 'profile') if parser.SERVICE and not parser.PROFILE and not parser.CHECK_ALL: if parser.CONTROLLER: check('controller', 'service') if parser.COMPUTE: check('compute', 'service') if parser.MONGO: check('mongo', 'service') if parser.SERVICE and parser.PROFILE or parser.CHECK_ALL or not parser.PROFILE and not parser.SERVICE: if parser.CONTROLLER: check('controller', 'all') if parser.COMPUTE: check('compute', 'all') if parser.MONGO: check('mongo', 'all') return # check all if parser.CHECK_ALL and parser.PROFILE and parser.SERVICE: check_all() return elif parser.CHECK_ALL and parser.PROFILE: check_all_profile() return elif parser.CHECK_ALL and parser.SERVICE: check_all_service() return elif parser.CHECK_ALL: check_all() return # check profile or service if parser.PROFILE: check_all_profile() if parser.SERVICE: check_all_service()
def stack(parser): # if node role is "unknow", go back if NODE_ROLE.is_unknown(): LOG.error('Can not confirm the node role!') return if not NODE_ROLE.is_fuel(): if parser.CONTROLLER: if not NODE_ROLE.is_controller(): cmd_warn('controller') return if parser.COMPUTE: if not NODE_ROLE.is_compute(): cmd_warn('compute') return if parser.MONGO: if not NODE_ROLE.is_mongo(): cmd_warn('mongo') return if parser.CONTROLLER or parser.COMPUTE or parser.MONGO: if parser.PROFILE and not parser.SERVICE and not parser.CHECK_ALL: if parser.CONTROLLER: check('controller', 'profile') if parser.COMPUTE: check('compute', 'profile') if parser.MONGO: check('mongo', 'profile') if parser.SERVICE and not parser.PROFILE and not parser.CHECK_ALL: if parser.CONTROLLER: check('controller', 'service') if parser.COMPUTE: check('compute', 'service') if parser.MONGO: check('mongo', 'service') if parser.SERVICE and parser.PROFILE or parser.CHECK_ALL or not parser.PROFILE and not parser.SERVICE: if parser.CONTROLLER: check('controller', 'all') if parser.COMPUTE: check('compute', 'all') if parser.MONGO: check('mongo', 'all') return # check all if parser.CHECK_ALL and parser.PROFILE and parser.SERVICE: check_all() return elif parser.CHECK_ALL and parser.PROFILE: check_all_profile() return elif parser.CHECK_ALL and parser.SERVICE: check_all_service() return elif parser.CHECK_ALL: check_all() return # check profile or service if parser.PROFILE: check_all_profile() if parser.SERVICE: check_all_service()
def volume(parser): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on controller node !') return if parser.DESTROY_VOLUME: if not parser.ID: LOG.error('Please use [--id ID] to specify the volume ID !') else: volume_id = parser.ID destroy_volume(volume_id)
def volume(parser): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on controller node !') return if parser.DESTROY_VOLUME: if not parser.ID: LOG.error('Please use [--id ID] to specify the volume ID !') else: volume_id = parser.ID destroy_volume(volume_id)
def _network_check_remote(remote_inf): def _ping(peer_inf, role): LOG.debug("=====> start ping %s of %s(%s):" % (role, peer_inf["host"], peer_inf["role"])) ping(peer_inf[role]) for inf in remote_inf: _ping(inf, "internal_address") if (not NODE_ROLE.is_mongo()) and (not inf["role"].endswith("mongo")): _ping(inf, "storage_address") if NODE_ROLE.is_controller() and inf["role"] == "controller": _ping(inf, "public_address")
def _network_check_remote(remote_inf): def _ping(peer_inf, role): LOG.debug('=====> start ping %s of %s(%s):' % (role, peer_inf['host'], peer_inf['role'])) ping(peer_inf[role]) for inf in remote_inf: _ping(inf, 'internal_address') if (not NODE_ROLE.is_mongo()) and (not inf['role'].endswith('mongo')): _ping(inf, 'storage_address') if NODE_ROLE.is_controller() and inf['role'] == 'controller': _ping(inf, 'public_address')
def _network_check_remote(remote_inf): def _ping(peer_inf, role): LOG.debug('=====> start ping %s of %s(%s):' % (role, peer_inf['host'], peer_inf['role'])) ping(peer_inf[role]) for inf in remote_inf: _ping(inf, 'internal_address') if (not NODE_ROLE.is_mongo()) and (not inf['role'].endswith('mongo')): _ping(inf, 'storage_address') if NODE_ROLE.is_controller() and inf['role'] == 'controller': _ping(inf, 'public_address')
def instance(parser): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on controller node !') return if parser.DELETE_INTANCE: if not parser.ID: LOG.error('Please use [--id ID] to specify the instance ID !') else: instance_id = parser.ID if parser.DELETE_DISK: delete_instance(instance_id, delete_disk=True) else: delete_instance(instance_id)
def check_all(): '''Check All Cluster''' # node role check if not NODE_ROLE.is_fuel(): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on fuel or controller node !') return if NODE_ROLE.is_fuel(): check_all_nodes('all') else: check_rabbitmq() check_mysql() check_haproxy() check_ceph()
def check_all(): check_cmd = get_check_cmd() if NODE_ROLE.is_fuel(): controllers = get_controllers_hostname() if controllers: controller_node = controllers[0] result = run_doctor_cmd_on_node('controller', controller_node, check_cmd) logging.disable(logging.NOTSET) LOG.info(result, remote=True) else: LOG.error('Can not get controller node list') elif NODE_ROLE.is_controller(): print run_command(check_cmd)
def create_parser(): parser = argparse.ArgumentParser( prog='eayunstack', formatter_class=argparse.RawDescriptionHelpFormatter, description="""EayunStack Management Tools.\n\n""", ) parser.add_argument( '-o', '--output', dest='FILENAME', help='Local File To Save Output Info', ) parser.add_argument( '-d', '--debug', action='store_true', dest='DEBUG', default=False, help='Log debug message or not', ) parser.add_argument( '-e', '--email', dest='EMAIL', help= 'email address which send error log to(use commas to separate multiple email address)', ) sub = parser.add_subparsers( title='Commands', metavar='COMMAND', help='DESCRIPTION', ) entry_points = [(e.name, e.load()) for e in pkg_resources.iter_entry_points('command')] for (name, fn) in entry_points: if name in ['manage', 'cleanup'] and not NODE_ROLE.is_controller(): continue p = sub.add_parser( name, description=fn.__doc__, help=fn.__doc__, ) fn(p) return parser
def check_rabbitmqrestart(): if NODE_ROLE.is_controller(): log_path = '/.eayunstack/rabbitmq_start_time' start_time = _get_from_ps() if os.path.exists(log_path): log_start_time = _get_from_log(log_path) if log_start_time == start_time: LOG.debug('service rabbitmq has never been restart') else: LOG.warn('service rabbitmq has been restart at %s' % start_time) _log_time(log_path, start_time) else: LOG.debug('the log file is not found') _log_time(log_path, start_time)
def get_node_role(): node_roles = [] if NODE_ROLE.is_unknown(): return node_roles if NODE_ROLE.is_fuel(): node_roles.append('fuel') if NODE_ROLE.is_controller(): node_roles.append('controller') if NODE_ROLE.is_compute(): node_roles.append('compute') if NODE_ROLE.is_ceph_osd(): node_roles.append('ceph_osd') if NODE_ROLE.is_mongo(): node_roles.append('mongo') return node_roles
def get_node_role(): node_roles = [] if NODE_ROLE.is_unknown(): return node_roles if NODE_ROLE.is_fuel(): node_roles.append('fuel') if NODE_ROLE.is_controller(): node_roles.append('controller') if NODE_ROLE.is_compute(): node_roles.append('compute') if NODE_ROLE.is_ceph_osd(): node_roles.append('ceph_osd') if NODE_ROLE.is_mongo(): node_roles.append('mongo') return node_roles
def check_all(): '''Check All Cluster''' # node role check if not NODE_ROLE.is_fuel(): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on fuel or controller node !') return if NODE_ROLE.is_fuel(): check_all_nodes('all') else: check_rabbitmq() check_mysql() check_haproxy() check_ceph() check_pacemaker() check_cephspace()
def vrouter_check(parser): if NODE_ROLE.is_fuel(): # TODO: run on fuel node on future LOG.error('This check can be run only on network node') return # pick one controller to run controller_node = None for node in NODE_ROLE.nodes: if node['roles'] == 'controller': controller_node = node['host'] cmd = 'source /root/openrc;%s' % (' '.join(sys.argv)) ssh_connect2(controller_node, cmd) elif NODE_ROLE.is_controller(): _vrouter_check(parser) else: LOG.error('This check can be run only on network node')
def _network_check_remote(remote_inf): def _ping(peer_inf, role): LOG.debug('=====> start ping %s of %s(%s):' % (role, peer_inf['host'], peer_inf['role'])) network_role = role.split('_')[0] hostname = peer_inf['host'].split(".")[0] ping(peer_inf[role], hostname, network_role) for inf in remote_inf: _ping(inf, 'internal_address') if (not NODE_ROLE.is_mongo()) and (not inf['role'].endswith('mongo')): _ping(inf, 'storage_address') if NODE_ROLE.is_controller() and inf['role'] == 'controller': _ping(inf, 'public_address') if NODE_ROLE.is_ceph_osd() and inf['role'] == 'ceph-osd': _ping(inf, 'ceph_cluster_address')
def check_cephspace(): # node role check if NODE_ROLE.is_controller(): LOG.info('%s%s Checking ceph space' % ('=' * 5, '>')) ceph_space = get_ceph_space() limit_war = 83 limit_error = 93 if ceph_space >= 0 and ceph_space < limit_war: LOG.info('The ceph space is used: %s%%' % ceph_space) elif ceph_space >= limit_war and ceph_space < limit_error: LOG.warn('The ceph space is used: %s%%' % ceph_space) # Whe ceph_space Error ,The ceph_space return -1 elif ceph_space < 0: LOG.error('The ceph space check error: Get ceph space Faild') else: LOG.error('The ceph space is used: %s%%' % ceph_space)
def check_cephspace(): # node role check if NODE_ROLE.is_controller(): LOG.info('%s%s Checking ceph space' % ('='*5, '>')) ceph_space = get_ceph_space() limit_war = 83 limit_error = 93 if ceph_space >= 0 and ceph_space < limit_war: LOG.info('The ceph space is used: %s%%' % ceph_space) elif ceph_space >= limit_war and ceph_space < limit_error: LOG.warn('The ceph space is used: %s%%' % ceph_space) # Whe ceph_space Error ,The ceph_space return -1 elif ceph_space < 0: LOG.error('The ceph space check error: Get ceph space Faild') else: LOG.error('The ceph space is used: %s%%' % ceph_space)
def vrouter_check(parser): if NODE_ROLE.is_fuel(): # TODO: run on fuel node on future LOG.error("This check can be run only on network node") return # pick one controller to run controller_node = None for node in NODE_ROLE.nodes: if node["roles"] == "controller": controller_node = node["host"] cmd = "source /root/openrc;%s" % (" ".join(sys.argv)) ssh_connect2(controller_node, cmd) elif NODE_ROLE.is_controller(): _vrouter_check(parser) else: LOG.error("This check can be run only on network node")
def vrouter_check(parser): if NODE_ROLE.is_fuel(): # TODO: run on fuel node on future LOG.error('This check can be run only on network node') return # pick one controller to run controller_node = None for node in NODE_ROLE.nodes: if node['roles'] == 'controller': controller_node = node['host'] cmd = 'source /root/openrc;%s' % (' '.join(sys.argv)) ssh_connect2(controller_node, cmd) elif NODE_ROLE.is_controller(): _vrouter_check(parser) else: LOG.error('This check can be run only on network node')
def create_parser(): parser = argparse.ArgumentParser( prog='eayunstack', formatter_class=argparse.RawDescriptionHelpFormatter, description="""EayunStack Management Tools.\n\n""", ) parser.add_argument( '-o', '--output', dest='FILENAME', help='Local File To Save Output Info', ) parser.add_argument( '-d', '--debug', action='store_true', dest='DEBUG', default=False, help='Log debug message or not', ) parser.add_argument( '-e', '--email', dest='EMAIL', help='email address which send error log to(use commas to separate multiple email address)', ) sub = parser.add_subparsers( title='Commands', metavar='COMMAND', help='DESCRIPTION', ) entry_points = [ (e.name, e.load()) for e in pkg_resources.iter_entry_points('command') ] for (name, fn) in entry_points: if name in ['manage', 'cleanup'] and not NODE_ROLE.is_controller(): continue p = sub.add_parser( name, description=fn.__doc__, help=fn.__doc__, ) fn(p) return parser
def check_haproxyresource(): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on controller node !') return LOG.info('%s%s Checking HAProxy resource status' %('='*5, '>')) monitor_url = get_haproxy_monitor_url() if not monitor_url: LOG.error('Can not get public vip in /etc/astute.yaml!') return monitor_content = get_haproxy_monitor_content(monitor_url) if not monitor_content: return resource_list = csv2dict(monitor_content) def _print_status(log_level='debug'): if check_status: eval('LOG.%s' % log_level)(\ '%s on %s status is %s, check_status is %s.'\ % (pxname, svname, status, check_status)) else: eval('LOG.%s' % log_level)('%s on %s status is %s.'\ % (pxname, svname, status)) for resource in resource_list: pxname = resource['pxname'] svname = resource['svname'] status = resource['status'] check_status = resource['check_status'] if svname == 'FRONTEND': if status == 'OPEN': _print_status() else: _print_status('error') else: if status == 'UP': _print_status() else: _print_status('error')
def check_haproxyresource(): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on controller node !') return LOG.info('%s%s Checking HAProxy resource status' % ('=' * 5, '>')) monitor_url = get_haproxy_monitor_url() if not monitor_url: LOG.error('Can not get public vip in /etc/astute.yaml!') return monitor_content = get_haproxy_monitor_content(monitor_url) if not monitor_content: return resource_list = csv2dict(monitor_content) def _print_status(log_level='debug'): if check_status: eval('LOG.%s' % log_level)(\ '%s on %s status is %s, check_status is %s.'\ % (pxname, svname, status, check_status)) else: eval('LOG.%s' % log_level)('%s on %s status is %s.'\ % (pxname, svname, status)) for resource in resource_list: pxname = resource['pxname'] svname = resource['svname'] status = resource['status'] check_status = resource['check_status'] if svname == 'FRONTEND': if status == 'OPEN': _print_status() else: _print_status('error') else: if status == 'UP': _print_status() else: _print_status('error')
def check_pacemaker(): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on controller node !') return LOG.info('%s%s Checking pacemaker resource status' % ('=' * 5, '>')) check_crm_resource_status()
def check_pacemaker(): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on controller node !') return LOG.info('%s%s Checking pacemaker resource status' %('='*5, '>')) check_crm_resource_status()