def check_all_nodes(check_obj): if check_obj is 'all': if LOG.enable_debug: check_cmd = 'sudo eayunstack --debug doctor cls --all' else: check_cmd = 'sudo eayunstack doctor cls --all' else: if LOG.enable_debug: check_cmd = 'sudo eayunstack --debug doctor cls -n %s' % check_obj else: check_cmd = 'sudo eayunstack doctor cls -n %s' % check_obj # get controller node list node_list = get_node_list('controller') # ssh to all controller node to check obj if len(node_list) == 0: LOG.warn('Node list is null !') return else: if check_obj == 'ceph': # only need to check one node for ceph cluster ceph_node = node_list[0] LOG.info('%s Role: %-10s Node: %-13s %s' % ('*'*15, 'controller', ceph_node, '*'*15)) ssh_connect2(ceph_node, check_cmd) else: for node in node_list: LOG.info('%s Role: %-10s Node: %-13s %s' % ('*'*15, 'controller', node, '*'*15)) ssh_connect2(node, check_cmd)
def check_all_nodes(check_obj): if check_obj is "all": if LOG.enable_debug: check_cmd = "sudo eayunstack --debug doctor cls --all" else: check_cmd = "sudo eayunstack doctor cls --all" else: if LOG.enable_debug: check_cmd = "sudo eayunstack --debug doctor cls -n %s" % check_obj else: check_cmd = "sudo eayunstack doctor cls -n %s" % check_obj # get controller node list node_list = get_node_list("controller") # ssh to all controller node to check obj if len(node_list) == 0: LOG.warn("Node list is null !") return else: if check_obj == "ceph": # only need to check one node for ceph cluster ceph_node = node_list[0] LOG.info("%s Role: %-10s Node: %-13s %s" % ("*" * 15, "controller", ceph_node, "*" * 15)) ssh_connect2(ceph_node, check_cmd) else: for node in node_list: LOG.info("%s Role: %-10s Node: %-13s %s" % ("*" * 15, "controller", node, "*" * 15)) ssh_connect2(node, check_cmd)
def check_nodes(obj_name): # node_list = get_node_list('all') for role in ["controller", "compute", "mongo", "ceph-osd"]: node_list = get_node_list(role) for node in node_list: LOG.info("%s Role: %-10s Node: %-13s %s" % ("*" * 15, role, node, "*" * 15)) if LOG.enable_debug: out, err = ssh_connect2(node, "sudo eayunstack --debug doctor env -n %s" % obj_name) else: out, err = ssh_connect2(node, "sudo eayunstack doctor env -n %s" % obj_name) if err: LOG.error("Check failed !")
def check_nodes(obj_name): # node_list = get_node_list('all') for role in ['controller','compute','mongo','ceph-osd']: node_list = get_node_list(role) for node in node_list: LOG.info('%s Role: %-10s Node: %-13s %s' % ('*'*15, role, node, '*'*15)) if LOG.enable_debug: out, err = ssh_connect2(node, 'sudo eayunstack --debug doctor env -n %s' % obj_name) else: out, err = ssh_connect2(node, 'sudo eayunstack doctor env -n %s' % obj_name) if err: LOG.error('Check failed !')
def push_repo_file_to_node(host, plugin_name, src_path, backup=False): LOG.debug('Push %s to node %s .' % (src_path, host)) if backup: ssh_connect2(host, 'test -e /etc/yum.repos.d/bak || mkdir /etc/yum.repos.d/bak/') (out, err) = ssh_connect2(host, 'mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak/') if err == '': scp_connect(host, src_path, '/etc/yum.repos.d/%s.repo' % plugin_name) else: LOG.error('Can not backup "/etc/yum.repos.d/*.repo" on node %s .') else: scp_connect(host, src_path, '/etc/yum.repos.d/%s.repo' % plugin_name)
def push_repo_file_to_node(host, plugin_name, src_path, backup=False): LOG.debug('Push %s to node %s .' % (src_path, host)) if backup: ssh_connect2( host, 'test -e /etc/yum.repos.d/bak || mkdir /etc/yum.repos.d/bak/') (out, err) = ssh_connect2( host, 'mv /etc/yum.repos.d/*.repo /etc/yum.repos.d/bak/') if err == '': scp_connect(host, src_path, '/etc/yum.repos.d/%s.repo' % plugin_name) else: LOG.error('Can not backup "/etc/yum.repos.d/*.repo" on node %s .') else: scp_connect(host, src_path, '/etc/yum.repos.d/%s.repo' % plugin_name)
def check_all(): """Check All Environement Object""" if NODE_ROLE.is_fuel(): for role in ["controller", "compute", "mongo", "ceph-osd"]: node_list = get_node_list(role) for node in node_list: LOG.info("%s Role: %-10s Node: %-13s %s" % ("*" * 15, role, node, "*" * 15)) if LOG.enable_debug: out, err = ssh_connect2(node, "sudo eayunstack --debug doctor env -a") else: out, err = ssh_connect2(node, "sudo eayunstack doctor env -a") if err: LOG.error("Check failed !") else: for i in register.all: eval(i)()
def check_all(): '''Check All Environement Object''' if NODE_ROLE.is_fuel(): for role in ['controller','compute','mongo','ceph-osd']: node_list = get_node_list(role) for node in node_list: LOG.info('%s Role: %-10s Node: %-13s %s' % ('*'*15, role, node, '*'*15)) if LOG.enable_debug: out, err = ssh_connect2(node, 'sudo eayunstack --debug doctor env -a') else: out, err = ssh_connect2(node, 'sudo eayunstack doctor env -a') if err: LOG.error('Check failed !') else: for i in register.all: eval(i)()
def vrouter_check(parser): if NODE_ROLE.is_fuel(): # TODO: run on fuel node on future LOG.error('This check can be run only on network node') return # pick one controller to run controller_node = None for node in NODE_ROLE.nodes: if node['roles'] == 'controller': controller_node = node['host'] cmd = 'source /root/openrc;%s' % (' '.join(sys.argv)) ssh_connect2(controller_node, cmd) elif NODE_ROLE.is_controller(): _vrouter_check(parser) else: LOG.error('This check can be run only on network node')
def vrouter_check(parser): if NODE_ROLE.is_fuel(): # TODO: run on fuel node on future LOG.error("This check can be run only on network node") return # pick one controller to run controller_node = None for node in NODE_ROLE.nodes: if node["roles"] == "controller": controller_node = node["host"] cmd = "source /root/openrc;%s" % (" ".join(sys.argv)) ssh_connect2(controller_node, cmd) elif NODE_ROLE.is_controller(): _vrouter_check(parser) else: LOG.error("This check can be run only on network node")
def push_yaml_to_node(host, src_path, dst_file_name): (out, err) = ssh_connect2(host, 'test -d /etc/hiera || mkdir /etc/hiera') if err == '': LOG.debug('Push %s to node %s .' % (src_path, host)) scp_connect(host, src_path, '/etc/hiera/%s' % dst_file_name) else: LOG.error('Can not create "/etc/hiera/" on node %s .' % host)
def setup_nodes(myip): repo_content = """ [eayunstack] name=eayunstack baseurl=http://{ip}:8080/eayunstack/repo gpgcheck=0 """ tmp_repo_file = '/tmp/eayunstack.repo' target_repo_file = '/etc/yum.repos.d/eayunstack.repo' with open(tmp_repo_file, 'wb') as f: f.write(repo_content.format(ip=myip)) setup_command = 'mkdir -p /var/lib/eayunstack/{upgrade,puppet}' for node in NODE_ROLE.nodes: scp_connect(node['ip'], tmp_repo_file, target_repo_file) ssh_connect2(node['ip'], setup_command) os.unlink(tmp_repo_file)
def check_nodes(node_role, check_obj, multi_role=False): if multi_role: if LOG.enable_debug: check_cmd = 'sudo eayunstack --debug doctor stack --' + check_obj else: check_cmd = 'sudo eayunstack doctor stack --' + check_obj else: if LOG.enable_debug: check_cmd = 'sudo eayunstack --debug doctor stack --' + check_obj + ' --%s' % node_role else: check_cmd = 'sudo eayunstack doctor stack --' + check_obj + ' --%s' % node_role node_list = get_node_list(node_role) if len(node_list) == 0: LOG.warn('Node list is null !') return for node in node_list: LOG.info('%s Role: %-10s Node: %-13s %s' % ('*'*15, node_role, node, '*'*15)) # ssh to node and run command ssh_connect2(node, check_cmd)
def run_doctor_cmd_on_node(role, node, cmd): logging.disable(logging.NOTSET) LOG.info('%s%s Push check cmd to %-13s (%-10s) %s%s' % ('<', '=' * 2, node, role, '=' * 2, '>')) out, err = ssh_connect2(node, cmd, check_all=True) return out + err
def run_doctor_cmd_on_node(role, node, cmd): out, err = ssh_connect2(node, cmd, check_all=True) return out + err
def run_doctor_cmd_on_node(role, node, cmd, pipe): LOG.info('%s%s Push check cmd to %-13s (%-10s) %s%s' % ('<', '=' * 2, node, role, '=' * 2, '>')) ssh_connect2(node, cmd, pipe=pipe)
def run_doctor_cmd_on_node(role, node, cmd, pipe): LOG.info('%s%s Push check cmd to %-13s (%-10s) %s%s' % ('<', '='*2, node, role, '='*2, '>')) ssh_connect2(node, cmd, pipe=pipe)