def check_mysql(): # node role check if not NODE_ROLE.is_fuel(): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on fuel or controller node !') return if NODE_ROLE.is_fuel(): check_all_nodes('mysql') return LOG.info('%s%s Checking mysql cluster status' % ('=' * 5, '>')) # get running node list for mysql cluster running_nodes = get_mysql_nodes() if running_nodes is None: LOG.error('Can not get the running node list for mysql cluster !') return # get all controller node hostname controllers = get_controllers_hostname() if controllers is None: LOG.error('Can not get the controllers node list !') return # check all controller node in mysql cluster error_nodes = [] for node in controllers: if node not in running_nodes: error_nodes.append(node) if error_nodes: LOG.error('Node %s is not running in mysql cluster !' % error_nodes) LOG.error('Mysql cluster check faild !') else: LOG.info('Mysql cluster check successfully !')
def check_ceph(): # node role check if not NODE_ROLE.is_fuel(): if not NODE_ROLE.is_controller(): if not NODE_ROLE.is_ceph_osd(): LOG.warn('This command can only run on fuel or controller or ceph-osd node !') return if NODE_ROLE.is_fuel(): check_all_nodes('ceph') return # get cluster status LOG.info('%s%s Checking ceph cluster status' %('='*5, '>')) ceph_check_health() # check osd status LOG.info('%s%s Checking ceph osd status' %('='*5, '>')) check_success = True osd_status = get_ceph_osd_status() if not osd_status: LOG.error('Can not get ceph osd status !') check_success = False else: for l in osd_status.split('\n'): if 'id' not in l and 'weigh' not in l and 'osd.' in l: osd = l.split()[2] status = l.split()[3] if status != 'up': LOG.error('%s status is not correct, please check it !' % osd) check_success = False if check_success: LOG.info('Ceph osd status check successfully !')
def check_rabbitmq(): # node role check if not NODE_ROLE.is_fuel(): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on fuel or controller node !') return if NODE_ROLE.is_fuel(): check_all_nodes('rabbitmq') return LOG.info('%s%s Checking rabbitmq cluster status' %('='*5, '>')) # get all controller node hostname controllers = get_controllers_hostname() if controllers is None: LOG.error('Can not get the controllers node list !') return # get masters & slaves node list running_nodes = get_rabbitmq_nodes() if running_nodes is None: LOG.error('Can not get the running node list for rabbitmq cluster !') return # check all controller nodes in masters + slaves node list error_nodes = [] for node in controllers: if node.split('.')[0] not in running_nodes: error_nodes.append(node) if error_nodes: LOG.error('Node %s not in rabbitmq cluster !' % error_nodes) LOG.error('Rabbitmq cluster check faild !') else: LOG.info('Rabbitmq cluster check successfully !')
def check_mysql(): # node role check if not NODE_ROLE.is_fuel(): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on fuel or controller node !') return if NODE_ROLE.is_fuel(): check_all_nodes('mysql') return LOG.info('%s%s Checking mysql cluster status' %('='*5, '>')) # get running node list for mysql cluster running_nodes = get_mysql_nodes() if running_nodes is None: LOG.error('Can not get the running node list for mysql cluster !') return # get all controller node hostname controllers = get_controllers_hostname() if controllers is None: LOG.error('Can not get the controllers node list !') return # check all controller node in mysql cluster error_nodes = [] for node in controllers: if node not in running_nodes: error_nodes.append(node) if error_nodes: LOG.error('Node %s is not running in mysql cluster !' % error_nodes) LOG.error('Mysql cluster check faild !') else: LOG.info('Mysql cluster check successfully !')
def check_rabbitmq(): # node role check if not NODE_ROLE.is_fuel(): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on fuel or controller node !') return if NODE_ROLE.is_fuel(): check_all_nodes('rabbitmq') return LOG.info('%s%s Checking rabbitmq cluster status' % ('=' * 5, '>')) # get all controller node hostname controllers = get_controllers_hostname() if controllers is None: LOG.error('Can not get the controllers node list !') return # get masters & slaves node list running_nodes = get_rabbitmq_nodes() if running_nodes is None: LOG.error('Can not get the running node list for rabbitmq cluster !') return # check all controller nodes in masters + slaves node list error_nodes = [] for node in controllers: if node.split('.')[0] not in running_nodes: error_nodes.append(node) if error_nodes: LOG.error('Node %s not in rabbitmq cluster !' % error_nodes) LOG.error('Rabbitmq cluster check faild !') else: LOG.info('Rabbitmq cluster check successfully !')
def check_ceph(): # node role check if not NODE_ROLE.is_fuel(): if not NODE_ROLE.is_controller(): if not NODE_ROLE.is_ceph_osd(): LOG.warn( 'This command can only run on fuel or controller or ceph-osd node !' ) return if NODE_ROLE.is_fuel(): check_all_nodes('ceph') return # get cluster status LOG.info('%s%s Checking ceph cluster status' % ('=' * 5, '>')) ceph_check_health() # check osd status LOG.info('%s%s Checking ceph osd status' % ('=' * 5, '>')) check_success = True osd_status = get_ceph_osd_status() if not osd_status: LOG.error('Can not get ceph osd status !') check_success = False else: for l in osd_status.split('\n'): if 'id' not in l and 'weigh' not in l and 'osd.' in l: osd = l.split()[2] status = l.split()[3] if status != 'up': LOG.error('%s status is not correct, please check it !' % osd) check_success = False if check_success: LOG.info('Ceph osd status check successfully !')
def stack(parser): # if node role is "unknow", go back if NODE_ROLE.is_unknown(): LOG.error('Can not confirm the node role!') return if not NODE_ROLE.is_fuel(): if parser.CONTROLLER: if not NODE_ROLE.is_controller(): cmd_warn('controller') return if parser.COMPUTE: if not NODE_ROLE.is_compute(): cmd_warn('compute') return if parser.MONGO: if not NODE_ROLE.is_mongo(): cmd_warn('mongo') return if parser.CONTROLLER or parser.COMPUTE or parser.MONGO: if parser.PROFILE and not parser.SERVICE and not parser.CHECK_ALL: if parser.CONTROLLER: check('controller', 'profile') if parser.COMPUTE: check('compute', 'profile') if parser.MONGO: check('mongo', 'profile') if parser.SERVICE and not parser.PROFILE and not parser.CHECK_ALL: if parser.CONTROLLER: check('controller', 'service') if parser.COMPUTE: check('compute', 'service') if parser.MONGO: check('mongo', 'service') if parser.SERVICE and parser.PROFILE or parser.CHECK_ALL or not parser.PROFILE and not parser.SERVICE: if parser.CONTROLLER: check('controller', 'all') if parser.COMPUTE: check('compute', 'all') if parser.MONGO: check('mongo', 'all') return # check all if parser.CHECK_ALL and parser.PROFILE and parser.SERVICE: check_all() return elif parser.CHECK_ALL and parser.PROFILE: check_all_profile() return elif parser.CHECK_ALL and parser.SERVICE: check_all_service() return elif parser.CHECK_ALL: check_all() return # check profile or service if parser.PROFILE: check_all_profile() if parser.SERVICE: check_all_service()
def init(parser): if NODE_ROLE.is_unknown(): LOG.error("Can not confirm the node role!") if not NODE_ROLE.is_fuel(): LOG.warn("This command can only run on fuel node !") return init_node_list_file() init_node_role_file()
def init(parser): if NODE_ROLE.is_unknown(): LOG.error('Can not confirm the node role!') if not NODE_ROLE.is_fuel(): LOG.warn('This command can only run on fuel node !') return init_node_list_file() init_node_role_file()
def _network_check_remote(remote_inf): def _ping(peer_inf, role): LOG.debug("=====> start ping %s of %s(%s):" % (role, peer_inf["host"], peer_inf["role"])) ping(peer_inf[role]) for inf in remote_inf: _ping(inf, "internal_address") if (not NODE_ROLE.is_mongo()) and (not inf["role"].endswith("mongo")): _ping(inf, "storage_address") if NODE_ROLE.is_controller() and inf["role"] == "controller": _ping(inf, "public_address")
def init(parser): if NODE_ROLE.is_unknown(): LOG.error('Can not confirm the node role!') if not NODE_ROLE.is_fuel(): LOG.warn('This command can only run on fuel node !') return if parser.UPDATE: update() return init_env() init_node_list_file() init_node_role_file()
def _network_check_remote(remote_inf): def _ping(peer_inf, role): LOG.debug('=====> start ping %s of %s(%s):' % (role, peer_inf['host'], peer_inf['role'])) ping(peer_inf[role]) for inf in remote_inf: _ping(inf, 'internal_address') if (not NODE_ROLE.is_mongo()) and (not inf['role'].endswith('mongo')): _ping(inf, 'storage_address') if NODE_ROLE.is_controller() and inf['role'] == 'controller': _ping(inf, 'public_address')
def check_all(): check_cmd = get_check_cmd() if NODE_ROLE.is_fuel(): controllers = get_controllers_hostname() if controllers: controller_node = controllers[0] result = run_doctor_cmd_on_node('controller', controller_node, check_cmd) logging.disable(logging.NOTSET) LOG.info(result, remote=True) else: LOG.error('Can not get controller node list') elif NODE_ROLE.is_controller(): print run_command(check_cmd)
def check_all(): '''Check All Cluster''' # node role check if not NODE_ROLE.is_fuel(): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on fuel or controller node !') return if NODE_ROLE.is_fuel(): check_all_nodes('all') else: check_rabbitmq() check_mysql() check_haproxy() check_ceph()
def vrouter_check(parser): if NODE_ROLE.is_fuel(): # TODO: run on fuel node on future LOG.error('This check can be run only on network node') return # pick one controller to run controller_node = None for node in NODE_ROLE.nodes: if node['roles'] == 'controller': controller_node = node['host'] cmd = 'source /root/openrc;%s' % (' '.join(sys.argv)) ssh_connect2(controller_node, cmd) elif NODE_ROLE.is_controller(): _vrouter_check(parser) else: LOG.error('This check can be run only on network node')
def setup(parser): """Set things up for the upgrade operation.""" if NODE_ROLE.is_fuel(): setup_rsyncd_config() setup_nodes(parser.MYIP) else: LOG.error('This command can only be run on the fuel node.')
def orphan(parser): logging.disable(logging.INFO) if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on controller node !') return # run delete orphan # run delete servers thread first nova_thread = RunNovaThread() nova_thread.start() nova_thread.join() # run other thread parallel threads = [ RunCinderThread(), RunGlanceThread(), RunNetBaseThread(), RunFirewallThread(), RunSecgroupThread(), RunVPNThread(), RunLBThread(), RunQoSThread() ] for thread in threads: thread.start() for thread in threads: thread.join() logging.disable(logging.NOTSET)
def ami(parser): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on controller node !') else: # "if controller leave to last" if not parser.KERNEL_FILE and not parser.INITRD_FILE and not parser.IMAGE_FILE: LOG.error( 'Lack of arguments, you can use --help to get help infomation\n' ) elif not parser.KERNEL_FILE: LOG.error('Please specify the kernel file\n') elif not parser.INITRD_FILE: LOG.error('Please specify the initrd file\n') elif not parser.IMAGE_FILE: LOG.error('Please specify the image file\n') else: if parser.NAME: # split the path and filename kernel_file_name = os.path.basename(r'%s' % parser.KERNEL_FILE) initrd_file_name = os.path.basename(r'%s' % parser.INITRD_FILE) ami_image_upload(parser.KERNEL_FILE, kernel_file_name, parser.INITRD_FILE, initrd_file_name, parser.IMAGE_FILE, parser.NAME) else: # if not specify image name, use IMAGE_FILE as AMI name # split the path and filename kernel_file_name = os.path.basename(r'%s' % parser.KERNEL_FILE) initrd_file_name = os.path.basename(r'%s' % parser.INITRD_FILE) ami_image_name = os.path.basename(r'%s' % parser.IMAGE_FILE) ami_image_upload(parser.KERNEL_FILE, kernel_file_name, parser.INITRD_FILE, initrd_file_name, parser.IMAGE_FILE, ami_image_name)
def _network_check_remote(remote_inf): def _ping(peer_inf, role): LOG.debug('=====> start ping %s of %s(%s):' % (role, peer_inf['host'], peer_inf['role'])) network_role = role.split('_')[0] hostname = peer_inf['host'].split(".")[0] ping(peer_inf[role], hostname, network_role) for inf in remote_inf: _ping(inf, 'internal_address') if (not NODE_ROLE.is_mongo()) and (not inf['role'].endswith('mongo')): _ping(inf, 'storage_address') if NODE_ROLE.is_controller() and inf['role'] == 'controller': _ping(inf, 'public_address') if NODE_ROLE.is_ceph_osd() and inf['role'] == 'ceph-osd': _ping(inf, 'ceph_cluster_address')
def ami(parser): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on controller node !') else: # "if controller leave to last" if not parser.KERNEL_FILE and not parser.INITRD_FILE and not parser.IMAGE_FILE: LOG.error('Lack of arguments, you can use --help to get help infomation\n') elif not parser.KERNEL_FILE: LOG.error('Please specify the kernel file\n') elif not parser.INITRD_FILE: LOG.error('Please specify the initrd file\n') elif not parser.IMAGE_FILE: LOG.error('Please specify the image file\n') else: if parser.NAME: # split the path and filename kernel_file_name = os.path.basename(r'%s' % parser.KERNEL_FILE) initrd_file_name = os.path.basename(r'%s' % parser.INITRD_FILE) ami_image_upload(parser.KERNEL_FILE, kernel_file_name, parser.INITRD_FILE, initrd_file_name, parser.IMAGE_FILE, parser.NAME) else: # if not specify image name, use IMAGE_FILE as AMI name # split the path and filename kernel_file_name = os.path.basename(r'%s' % parser.KERNEL_FILE) initrd_file_name = os.path.basename(r'%s' % parser.INITRD_FILE) ami_image_name = os.path.basename(r'%s' % parser.IMAGE_FILE) ami_image_upload(parser.KERNEL_FILE, kernel_file_name, parser.INITRD_FILE, initrd_file_name, parser.IMAGE_FILE, ami_image_name)
def _network_check_local(local_inf, nic_status): # 1) check if nic we need link is ok if NODE_ROLE.is_mongo(): local_inf = [i for i in local_inf if i["name"] not in ["br-storage", "br-prv"]] if NODE_ROLE.is_ceph_osd(): local_inf = [i for i in local_inf if i["name"] != "br-prv"] nic_need = [i["phy_port"] for i in local_inf] for nic in set(nic_need): # if two network roles use same nic, e.g. br-mgmt and br-fw-admin # use eno1, we can ignore it since we just want physic network nic inf = filter(lambda inf: inf["phy_port"] == nic, local_inf)[0] if nic_status[nic].lower() != "yes": LOG.error("Network card %s(%s) is not connected" % (nic, inf["name"])) else: LOG.debug("Network card %s(%s) connected" % (nic, inf["name"]))
def check_all(): '''Check All Cluster''' # node role check if not NODE_ROLE.is_fuel(): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on fuel or controller node !') return if NODE_ROLE.is_fuel(): check_all_nodes('all') else: check_rabbitmq() check_mysql() check_haproxy() check_ceph() check_pacemaker() check_cephspace()
def vrouter_check(parser): if NODE_ROLE.is_fuel(): # TODO: run on fuel node on future LOG.error("This check can be run only on network node") return # pick one controller to run controller_node = None for node in NODE_ROLE.nodes: if node["roles"] == "controller": controller_node = node["host"] cmd = "source /root/openrc;%s" % (" ".join(sys.argv)) ssh_connect2(controller_node, cmd) elif NODE_ROLE.is_controller(): _vrouter_check(parser) else: LOG.error("This check can be run only on network node")
def deployment_monitor_plugins(parser): if not NODE_ROLE.is_fuel(): LOG.warn('This command can only run on fuel node !') return if parser.INFLUXDB: deployment_influxdb_grafana(parser.ENV) if parser.LMA_COLLECTOR: deployment_lma_collector(parser.ENV)
def check_all(): '''Check All OpenStack Component''' if not NODE_ROLE.is_fuel(): check_all_profile() check_all_service() else: for role in all_roles: check_nodes(role, 'all')
def check_all_service(): if NODE_ROLE.is_fuel(): for role in all_roles: if role == 'ceph_osd': role = 'ceph-osd' check_nodes(role, 'service', multi_role=True) else: for node_role in node_roles: eval('check_%s_service' % node_role)()
def check_all_service(): if NODE_ROLE.is_fuel(): for role in all_roles: check_nodes(role, 'service', multi_role=True) else: for node_role in node_roles: if node_role != 'ceph_osd': eval('check_%s_service' % node_role)()
def go(parser): """Upgrade""" if NODE_ROLE.is_fuel(): if parser.CHECK_ONLY: check_upgrade_process() else: go_upgrade(parser.MYIP) else: LOG.error('This command can only be run on the fuel node.')
def check_all_profile(): if NODE_ROLE.is_fuel(): for role in all_roles: if role == 'ceph_osd': role = 'ceph-osd' check_nodes(role, 'profile', multi_role=True) else: for node_role in node_roles: # print node_role eval('check_%s_profile' % node_role)()
def check_all_profile(): if NODE_ROLE.is_fuel(): for role in all_roles: check_nodes(role, 'profile', multi_role=True) else: for node_role in node_roles: # print node_role if node_role != 'ceph_osd': eval('check_%s_profile' % node_role)()
def volume(parser): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on controller node !') return if parser.DESTROY_VOLUME: if not parser.ID: LOG.error('Please use [--id ID] to specify the volume ID !') else: volume_id = parser.ID destroy_volume(volume_id)
def _network_check_local(local_inf, nic_status): # 1) check if nic we need link is ok if NODE_ROLE.is_mongo(): local_inf = [ i for i in local_inf if i['name'] not in ['br-storage', 'br-prv'] ] if NODE_ROLE.is_ceph_osd(): local_inf = [i for i in local_inf if i['name'] != 'br-prv'] nic_need = [] for inf in local_inf: nic_need.extend(inf['phy_port']) for nic in set(nic_need): # if two network roles use same nic, e.g. br-mgmt and br-fw-admin # use eno1, we can ignore it since we just want physic network nic inf = filter(lambda inf: nic in inf['phy_port'], local_inf)[0] if nic_status[nic].lower() != 'yes': LOG.error('Network card %s(%s) is not connected' % (nic, inf['name'])) else: LOG.debug('Network card %s(%s) connected' % (nic, inf['name']))
def check(role, obj): if NODE_ROLE.is_fuel(): check_nodes(role, obj) else: if not eval('NODE_ROLE.is_%s' % role)(): LOG.warn('This command can only run on fuel or %s node !' % role) else: if obj == 'all': eval('check_%s_%s' % (role, 'profile'))() eval('check_%s_%s' % (role, 'service'))() else: eval('check_%s_%s' % (role, obj))()
def _network_check_local(local_inf, nic_status): # 1) check if nic we need link is ok if NODE_ROLE.is_mongo(): local_inf = [i for i in local_inf if i['name'] not in ['br-storage', 'br-prv']] if NODE_ROLE.is_ceph_osd(): local_inf = [i for i in local_inf if i['name'] != 'br-prv'] nic_need = [] for inf in local_inf: nic_need.extend(inf['phy_port']) for nic in set(nic_need): # if two network roles use same nic, e.g. br-mgmt and br-fw-admin # use eno1, we can ignore it since we just want physic network nic inf = filter(lambda inf: nic in inf['phy_port'], local_inf)[0] if nic_status[nic].lower() != 'yes': LOG.error('Network card %s(%s) is not connected' % (nic, inf['name'])) else: LOG.debug('Network card %s(%s) connected' % (nic, inf['name']))
def check_all(): '''Check All Environement Object''' if NODE_ROLE.is_fuel(): check_cmd = get_check_cmd('all') for role in ['controller','compute','mongo','ceph-osd']: node_list = get_node_list(role) proc_list = run_doctor_on_nodes(role, node_list, check_cmd) for proc in proc_list: proc.join() else: for i in register.all: eval(i)()
def check_all(): '''Check All Environement Object''' if NODE_ROLE.is_fuel(): check_cmd = get_check_cmd('all') for role in ['controller', 'compute', 'mongo', 'ceph-osd']: node_list = get_node_list(role) (proc_list, pipe) = run_doctor_on_nodes(role, node_list, check_cmd) for proc in proc_list: proc.join() LOG.info(pipe.recv(), remote=True) else: for i in register.all: eval(i)()
def instance(parser): if not NODE_ROLE.is_controller(): LOG.warn('This command can only run on controller node !') return if parser.DELETE_INTANCE: if not parser.ID: LOG.error('Please use [--id ID] to specify the instance ID !') else: instance_id = parser.ID if parser.DELETE_DISK: delete_instance(instance_id, delete_disk=True) else: delete_instance(instance_id)
def check_mongodb(): if NODE_ROLE.is_mongo(): role = NODE_ROLE.role if search_service('mongod'): LOG.error('mongod service was not found on %s node,please fix it' \ % role ) else: yaml_path = '/etc/astute.yaml' check_service('mongod') mongodb_stats = _get_mongodb_stats(yaml_path) if isinstance(mongodb_stats, dict): LOG.debug("mongod service is ok:%s" % mongodb_stats) else: LOG.error('mongod service is wrong:%s' % mongodb_stats)
def create_parser(): parser = argparse.ArgumentParser( prog='eayunstack', formatter_class=argparse.RawDescriptionHelpFormatter, description="""EayunStack Management Tools.\n\n""", ) parser.add_argument( '-o', '--output', dest='FILENAME', help='Local File To Save Output Info', ) parser.add_argument( '-d', '--debug', action='store_true', dest='DEBUG', default=False, help='Log debug message or not', ) parser.add_argument( '-e', '--email', dest='EMAIL', help= 'email address which send error log to(use commas to separate multiple email address)', ) sub = parser.add_subparsers( title='Commands', metavar='COMMAND', help='DESCRIPTION', ) entry_points = [(e.name, e.load()) for e in pkg_resources.iter_entry_points('command')] for (name, fn) in entry_points: if name in ['manage', 'cleanup'] and not NODE_ROLE.is_controller(): continue p = sub.add_parser( name, description=fn.__doc__, help=fn.__doc__, ) fn(p) return parser
def env(parser): if NODE_ROLE.is_unknown(): LOG.error('Can not confirm the node role!') return if parser.CHECK_ALL: if not parser.OBJECT_NAME: check_all() else: check_nodes(parser.OBJECT_NAME) elif parser.OBJECT_NAME: object_func = 'check_%s' % (parser.OBJECT_NAME) eval(object_func)() else: # TODO: print usage pass
def check_rabbitmqrestart(): if NODE_ROLE.is_controller(): log_path = '/.eayunstack/rabbitmq_start_time' start_time = _get_from_ps() if os.path.exists(log_path): log_start_time = _get_from_log(log_path) if log_start_time == start_time: LOG.debug('service rabbitmq has never been restart') else: LOG.warn('service rabbitmq has been restart at %s' % start_time) _log_time(log_path, start_time) else: LOG.debug('the log file is not found') _log_time(log_path, start_time)
def check_all(): '''Check All Environement Object''' if NODE_ROLE.is_fuel(): for role in ['controller','compute','mongo','ceph-osd']: node_list = get_node_list(role) for node in node_list: LOG.info('%s Role: %-10s Node: %-13s %s' % ('*'*15, role, node, '*'*15)) if LOG.enable_debug: out, err = ssh_connect2(node, 'sudo eayunstack --debug doctor env -a') else: out, err = ssh_connect2(node, 'sudo eayunstack doctor env -a') if err: LOG.error('Check failed !') else: for i in register.all: eval(i)()
def check_network(): nic_status = _network_get_nic_status() if NODE_ROLE.is_fuel(): LOG.debug('Network card information:') for i in nic_status.keys(): LOG.debug('%s: %s' % (i, nic_status[i])) return cfg = yaml.load(file('/etc/astute.yaml')) # check node's nic status local_inf = _network_local_network_inf(cfg) _network_check_local(local_inf, nic_status) # check if node can connect to other node remote_inf = _network_remote_network_inf(cfg) _network_check_remote(remote_inf)
def check_cephspace(): # node role check if NODE_ROLE.is_controller(): LOG.info('%s%s Checking ceph space' % ('='*5, '>')) ceph_space = get_ceph_space() limit_war = 83 limit_error = 93 if ceph_space >= 0 and ceph_space < limit_war: LOG.info('The ceph space is used: %s%%' % ceph_space) elif ceph_space >= limit_war and ceph_space < limit_error: LOG.warn('The ceph space is used: %s%%' % ceph_space) # Whe ceph_space Error ,The ceph_space return -1 elif ceph_space < 0: LOG.error('The ceph space check error: Get ceph space Faild') else: LOG.error('The ceph space is used: %s%%' % ceph_space)
def check_all(): """Check All Environement Object""" if NODE_ROLE.is_fuel(): for role in ["controller", "compute", "mongo", "ceph-osd"]: node_list = get_node_list(role) for node in node_list: LOG.info("%s Role: %-10s Node: %-13s %s" % ("*" * 15, role, node, "*" * 15)) if LOG.enable_debug: out, err = ssh_connect2(node, "sudo eayunstack --debug doctor env -a") else: out, err = ssh_connect2(node, "sudo eayunstack doctor env -a") if err: LOG.error("Check failed !") else: for i in register.all: eval(i)()
def check_cephspace(): # node role check if NODE_ROLE.is_controller(): LOG.info('%s%s Checking ceph space' % ('=' * 5, '>')) ceph_space = get_ceph_space() limit_war = 83 limit_error = 93 if ceph_space >= 0 and ceph_space < limit_war: LOG.info('The ceph space is used: %s%%' % ceph_space) elif ceph_space >= limit_war and ceph_space < limit_error: LOG.warn('The ceph space is used: %s%%' % ceph_space) # Whe ceph_space Error ,The ceph_space return -1 elif ceph_space < 0: LOG.error('The ceph space check error: Get ceph space Faild') else: LOG.error('The ceph space is used: %s%%' % ceph_space)
def create_parser(): parser = argparse.ArgumentParser( prog='eayunstack', formatter_class=argparse.RawDescriptionHelpFormatter, description="""EayunStack Management Tools.\n\n""", ) parser.add_argument( '-o', '--output', dest='FILENAME', help='Local File To Save Output Info', ) parser.add_argument( '-d', '--debug', action='store_true', dest='DEBUG', default=False, help='Log debug message or not', ) parser.add_argument( '-e', '--email', dest='EMAIL', help='email address which send error log to(use commas to separate multiple email address)', ) sub = parser.add_subparsers( title='Commands', metavar='COMMAND', help='DESCRIPTION', ) entry_points = [ (e.name, e.load()) for e in pkg_resources.iter_entry_points('command') ] for (name, fn) in entry_points: if name in ['manage', 'cleanup'] and not NODE_ROLE.is_controller(): continue p = sub.add_parser( name, description=fn.__doc__, help=fn.__doc__, ) fn(p) return parser
def check_all(): '''Check All Environement Object''' if NODE_ROLE.is_fuel(): check_cmd = get_check_cmd('all') nodes = [] for role in ['controller', 'compute', 'mongo', 'ceph-osd']: node_list = get_node_list(role) for node in node_list: node_info = {} node_info['role'] = role node_info['name'] = node nodes.append(node_info) result = run_doctor_on_nodes(nodes, check_cmd) for res in result: LOG.info(res, remote=True) else: for i in register.all: eval(i)()
def get_node_role(): node_roles = [] if NODE_ROLE.is_unknown(): return node_roles if NODE_ROLE.is_fuel(): node_roles.append('fuel') if NODE_ROLE.is_controller(): node_roles.append('controller') if NODE_ROLE.is_compute(): node_roles.append('compute') if NODE_ROLE.is_ceph_osd(): node_roles.append('ceph_osd') if NODE_ROLE.is_mongo(): node_roles.append('mongo') return node_roles