def get_roles_hiera(self): def trim_primary(roles): trim_roles = [r for r in roles if not r.startswith('primary-')] trim_roles += [r[8:] for r in roles if r.startswith('primary-')] return trim_roles self.logger.debug('%s: roles not defined, trying hiera' % self.repr) cmd = 'hiera roles' outs, errs, code = tools.ssh_node(ip=self.ip, command=cmd, ssh_opts=self.ssh_opts, env_vars=self.env_vars, timeout=self.timeout, prefix=self.prefix) self.check_code(code, 'get_roles_hiera', cmd, errs, [0]) if code == 0: try: roles = trim_primary(json.loads(outs)) except: self.logger.warning("%s: failed to parse '%s' output as JSON" % (self.repr, cmd)) return self.roles self.logger.debug('%s: got roles: %s' % (self.repr, roles)) if roles is not None: return roles else: return self.roles else: self.logger.warning("%s: failed to load roles via hiera" % self.repr) self.roles
def get_os(self): self.logger.debug('%s: os_platform not defined, trying to determine' % self.repr) cmd = 'which lsb_release' outs, errs, code = tools.ssh_node(ip=self.ip, command=cmd, ssh_opts=self.ssh_opts, env_vars=self.env_vars, timeout=self.timeout, prefix=self.prefix) return 'centos' if code else 'ubuntu'
def get_cluster_id(self): self.logger.debug('%s: cluster id not defined, trying to determine' % self.repr) astute_file = '/etc/astute.yaml' cmd = ("python -c 'import yaml; a = yaml.load(open(\"%s\")" ".read()); print a[\"cluster\"][\"id\"]'" % astute_file) outs, errs, code = tools.ssh_node(ip=self.ip, command=cmd, ssh_opts=self.ssh_opts, env_vars=self.env_vars, timeout=self.timeout, prefix=self.prefix) return int(outs.rstrip('\n')) if code == 0 else None
def check_access(self): self.logger.debug('%s: verifyng node access' % self.repr) cmd = 'true' outs, errs, code = tools.ssh_node(ip=self.ip, command=cmd, ssh_opts=self.ssh_opts, env_vars=self.env_vars, timeout=self.timeout, prefix=self.prefix) if code == 0: return True else: self.logger.info('%s: not accessible' % self.repr) return False
def get_nodes_cli(self): self.logger.info('using CLI to get nodes json') fuelnode = self.nodes[self.conf['fuel_ip']] o_auth = n_auth = '' entropy = bool(self.conf['fuel_user']) + bool(self.conf['fuel_pass']) if entropy == 2: # auth for Fuel up to 8.0 o_auth = '--user %s --password %s' % (self.conf['fuel_user'], self.conf['fuel_pass']) # Fuel 9.0+ n_auth = 'OS_USERNAME=%s OS_PASSWORD=%s' % (self.conf['fuel_user'], self.conf['fuel_pass']) elif entropy == 1: self.logger.warning('Must specify both fuel_user and fuel_pass') cmd = 'bash -c "%s fuel node --json"' % n_auth nodes_json, err, code = tools.ssh_node(ip=fuelnode.ip, command=cmd, ssh_opts=fuelnode.ssh_opts, timeout=fuelnode.timeout, prefix=fuelnode.prefix) if code != 0: self.logger.warning(('NodeManager: cannot get fuel node list from' ' CLI, will fallback. Error: %s') % err) cmd = 'bash -c "fuel %s node --json"' % o_auth nodes_json, err, code = tools.ssh_node(ip=fuelnode.ip, command=cmd, ssh_opts=fuelnode.ssh_opts, timeout=fuelnode.timeout, prefix=fuelnode.prefix) if code != 0: self.logger.warning(('NodeManager: cannot get ' 'fuel node list from CLI: %s') % err) self.nodes_json = None return False self.nodes_json = json.loads(nodes_json) return True
def exec_simple_cmd(self, cmd, timeout=15, infile=None, outfile=None, fake=False, ok_codes=None, input=None): self.logger.info('%s, exec: %s' % (self.repr, cmd)) if not fake: outs, errs, code = tools.ssh_node(ip=self.ip, command=cmd, ssh_opts=self.ssh_opts, env_vars=self.env_vars, timeout=timeout, outputfile=outfile, ok_codes=ok_codes, input=input, prefix=self.prefix) self.check_code(code, 'exec_simple_cmd', cmd, errs, ok_codes)
def get_release(self): if self.id == 0: cmd = ("awk -F ':' '/release/ {print $2}' " "/etc/nailgun/version.yaml") else: cmd = ("awk -F ':' '/fuel_version/ {print $2}' " "/etc/astute.yaml") release, err, code = tools.ssh_node(ip=self.ip, command=cmd, ssh_opts=self.ssh_opts, timeout=self.timeout, prefix=self.prefix) if code != 0: self.logger.warning('%s: could not determine' ' MOS release' % self.repr) release = 'n/a' else: release = release.strip('\n "\'') self.logger.info('%s, MOS release: %s' % (self.repr, release)) return release
def logs_populate(self, timeout=5): def filter_by_re(item, string): return (('include' not in item or not item['include'] or any([re.search(i, string) for i in item['include']])) and ('exclude' not in item or not item['exclude'] or not any([re.search(e, string) for e in item['exclude']]))) for item in self.logs: self.log_item_manipulate(item) start_str = None if 'start' in item or hasattr(self, 'logs_days'): if hasattr(self, 'logs_days') and 'start' not in item: start = self.logs_days else: start = item['start'] if any([ type(start) is str and re.match(r'-?\d+', start), type(start) is int ]): days = abs(int(str(start))) start_str = str(date.today() - timedelta(days=days)) else: for format in ['%Y-%m-%d', '%Y-%m-%d %H:%M:%S']: try: if datetime.strptime(start, format): start_str = start break except ValueError: pass if not start_str: self.logger.warning(('incorrect value of "start"' ' parameter in "logs": "%s" -' ' ignoring...') % start) if start_str: start_param = ' -newermt "$(date -d \'%s\')"' % start_str else: start_param = '' cmd = ("find '%s' -type f%s -exec du -b {} +" % (item['path'], start_param)) self.logger.info('%s: logs du-cmd: %s' % (self.repr, cmd)) outs, errs, code = tools.ssh_node(ip=self.ip, command=cmd, ssh_opts=self.ssh_opts, env_vars=self.env_vars, timeout=timeout, prefix=self.prefix) if code == 124: self.logger.error("%s: command: %s, " "timeout code: %s, error message: %s" % (self.repr, cmd, code, errs)) break if len(outs): item['files'] = {} for line in outs.split('\n'): if '\t' in line: size, f = line.split('\t') if filter_by_re(item, f): item['files'][f] = int(size) else: self.logger.debug('log file "%s" excluded' % f) self.logger.debug('logs: %s' % (item['files'])) self.logger.info( '%s: total logs size: %dMB' % (self.repr, sum(self.logs_dict().values()) / 1024 / 1024)) return self.logs
def exec_pair(self, phase, server_node=None, fake=False): sn = server_node cl = self.cluster_repr if sn: self.logger.debug('%s: phase %s: server %s' % (self.repr, phase, sn.repr)) else: self.logger.debug('%s: phase %s' % (self.repr, phase)) nond_msg = ('%s: network specified but network_data not set for %s') nonet_msg = ('%s: network %s not found in network_data of %s') nosrv_msg = ('%s: server_node not provided') noip_msg = ('%s: %s has no IP in network %s') for i in self.scripts_all_pairs: if phase not in i: self.logger.warning('phase %s not defined in config' % phase) return self.scripts_all_pairs if phase.startswith('client'): if not sn: self.logger.warning(nosrv_msg % self.repr) return self.scripts_all_pairs if 'network' in i: if not sn.network_data: self.logger.warning(nond_msg % (self.repr, sn.repr)) return self.scripts_all_pairs nd = sn.network_data net_dict = dict((v['name'], v) for v in nd) if i['network'] not in net_dict: self.logger.warning(nonet_msg % (self.repr, i['network'], sn.repr)) return self.scripts_all_pairs if 'ip' not in net_dict[i['network']]: self.logger.warning(noip_msg % (self.repr, sn.repr, i['network'])) return self.scripts_all_pairs ip = net_dict[i['network']]['ip'] if '/' in ip: server_ip = ip.split('/')[0] else: server_ip = ip else: server_ip = sn.ip phase_val = i[phase] ddir = os.path.join(self.outdir, 'scripts_all_pairs', cl, phase, self.repr) tools.mdir(ddir) if type(phase_val) is dict: env_vars = [phase_val.values()[0]] phase_val = phase_val.keys()[0] else: env_vars = self.env_vars if os.path.sep in phase_val: f = phase_val else: f = os.path.join(self.rqdir, Node.skey, phase_val) dfile = os.path.join(ddir, os.path.basename(f)) if phase.startswith('client'): env_vars.append('SERVER_IP=%s' % server_ip) dname = os.path.basename(f) + '-%s' % server_ip dfile = os.path.join(ddir, dname) elif phase == 'server_stop' and 'server_output' in i: env_vars.append('SERVER_OUTPUT=%s' % i['server_output']) if fake: return self.scripts_all_pairs outs, errs, code = tools.ssh_node(ip=self.ip, filename=f, ssh_opts=self.ssh_opts, env_vars=env_vars, timeout=self.timeout, prefix=self.prefix) self.check_code(code, 'exec_pair, phase:%s' % phase, f, errs) if phase == 'server_start' and code == 0: i['server_output'] = outs.strip() open(dfile, 'a+').write(outs) return self.scripts_all_pairs
def exec_cmd(self, fake=False, ok_codes=None): cl = self.cluster_repr self.logger.debug('%s/%s/%s/%s' % (self.outdir, Node.ckey, cl, self.repr)) if self.cmds: ddir = os.path.join(self.outdir, Node.ckey, cl, self.repr) tools.mdir(ddir) self.cmds = sorted(self.cmds) mapcmds = {} for c in self.cmds: for cmd in c: dfile = os.path.join(ddir, cmd) errf = '%s.stderr' % dfile if self.outputs_timestamp: dfile += self.outputs_timestamp_str self.logger.info('outfile: %s' % dfile) mapcmds[cmd] = dfile if not fake: bash_cmd = "bash -c '%s'" % c[cmd] outs, errs, code = tools.ssh_node(ip=self.ip, command=bash_cmd, ssh_opts=self.ssh_opts, env_vars=self.env_vars, timeout=self.timeout, prefix=self.prefix) ec = self.check_code(code, 'exec_cmd', c[cmd], errs, ok_codes) try: with open(dfile, 'w') as df: df.write(outs) except IOError: self.logger.error("can't write to file %s" % dfile) if ec: try: with open(errf, 'w') as ef: ef.write('exitcode: %s\n' % code) ef.write(errs) except IOError: self.logger.error("can't write to file %s" % errf) if self.scripts: self.generate_mapscr() tools.mdir(self.scripts_ddir) for scr, param in self.mapscr.items(): if fake: continue outs, errs, code = tools.ssh_node(ip=self.ip, filename=param['script_path'], ssh_opts=self.ssh_opts, env_vars=param['env_vars'], timeout=self.timeout, prefix=self.prefix) ec = self.check_code(code, 'exec_cmd', ('script %s' % param['script_path']), errs, ok_codes) try: with open(param['output_path'], 'w') as df: df.write(outs) except IOError: self.logger.error("can't write to file %s" % param['output_path']) if not ec: try: with open(param['stderr_path'], 'w') as ef: ef.write('exitcode: %s\n' % code) ef.write(errs) except IOError: self.logger.error("can't write to file %s" % param['stderr_path']) return mapcmds, self.mapscr