def test_find_all(self): out = subp.call('ps xao pid,ppid,command | grep "supervisor[d]" | tr -s " "')[0] supervisors = [map(int, line.strip().split()[:2]) for line in out if 'supervisord' in line] assert_that(supervisors, has_length(1)) supervisor_pid, supervisor_ppid = supervisors[0] assert_that(supervisor_ppid, equal_to(1)) time.sleep(2) out = subp.call('ps xao pid,ppid,command | grep "nginx[:]" | tr -s " "')[0] masters = [map(int, line.strip().split()[:2]) for line in out if 'nginx: master process' in line] assert_that(masters, has_length(1)) master_pid, master_ppid = masters[0] assert_that(master_ppid, equal_to(supervisor_pid)) worker_pids = [] workers = [map(int, line.strip().split()[:2]) for line in out if 'nginx: worker process' in line] for worker_pid, worker_ppid in workers: worker_pids.append(worker_pid) assert_that(worker_ppid, equal_to(master_pid)) container = NginxManager() nginxes = container._find_all() assert_that(nginxes, has_length(1)) definition, data = nginxes.pop(0) assert_that(data, has_key('pid')) assert_that(data, has_key('workers')) assert_that(master_pid, equal_to(data['pid'])) assert_that(worker_pids, equal_to(data['workers']))
def uname(meta): """ Collects uname for the container, without a hostname :param meta: {} of meta """ uname_out, _ = subp.call('uname -s -r -v -m -p') meta['uname'] = uname_out.pop(0)
def certificate_subject(filename): results = {} openssl_out, _ = subp.call("openssl x509 -in %s -noout -subject -nameopt RFC2253" % filename, check=False) for line in openssl_out: if line: output = line[8:] # trim "subject=" or "Subject:" from output factors = output.split(',') # split output into distinct groups prev_factor = None for factor in factors: if '=' in factor: key, value = factor.split('=', 1) # only split on the first equal sign key = key.lstrip().upper() # remove leading spaces (if any) and capitalize (if lowercase) if key in ssl_subject_map: results[ssl_subject_map[key]] = value prev_factor = key elif prev_factor in ssl_subject_map: # If there wasn't an '=' in the current factor, go back the previous factor and append the current # factor to the result in order to account for values where a ',' was part of the value. results[ssl_subject_map[prev_factor]] += (',' + factor) # Replace escaped \ (workaround) results[ssl_subject_map[prev_factor]] = results[ssl_subject_map[prev_factor]].replace('\\', '') return results or None
def find_packages(self, meta): """ Find a package with running binary """ package_name = None # find which package contains our binary dpkg_s_nginx_out, dpkg_s_nginx_err = subp.call('dpkg -S %s' % self.object.bin_path, check=False) for line in dpkg_s_nginx_out: kv = re.match(self.dpkg_s_re, line) if kv: package_name = kv.group(1) break if dpkg_s_nginx_err: if 'no_path' in dpkg_s_nginx_err[0]: meta['warnings'].append('self-made binary, is not from any nginx package') if not package_name: return # get version all_installed_packages = self.installed_nginx_packages() if package_name in all_installed_packages: meta['packages'] = {package_name: all_installed_packages[package_name]}
def alive_interfaces(): """ Returns a list of all network interfaces which have UP state see ip link show dev eth0 will always return lo in a list if lo exists :return: [] of str """ alive_interfaces = set() try: for interface_name, interface in psutil.net_if_stats().iteritems(): if interface.isup: alive_interfaces.add(interface_name) except: # fallback for centos6 for interface_name in netifaces.interfaces(): ip_link_out, _ = subp.call("ip link show dev %s" % interface_name, check=False) if ip_link_out: first_line = ip_link_out[0] state_match = re.match('.+state\s+(\w+)\s+.*', first_line) if state_match: state = state_match.group(1) if interface_name == 'lo' or state == 'UP': alive_interfaces.add(interface_name) elif state == 'UNKNOWN': # If state is 'UNKNOWN" (e.g. venet with OpenVZ) check to see if 'UP' is in bracket summary bracket_match = re.match('.+<([\w,\,]+)>.+', first_line) bracket = bracket_match.group(0) for value in bracket.split(','): if value == 'UP': alive_interfaces.add(interface_name) break return alive_interfaces
def uname(meta): """ Collects the full uname for the OS :param meta: {} of meta """ uname_out, _ = subp.call('uname -a') meta['uname'] = uname_out.pop(0)
def network(meta): """ network -- Docker network meta report leaves out IP addresses since they will be different per container. """ # collect info for all the alive interfaces for interface in alive_interfaces(): addresses = netifaces.ifaddresses(interface) interface_info = { 'name': interface, 'mac': addresses.get(netifaces.AF_LINK, [{}])[0].get('addr') # Collect MAC address. } meta['network']['interfaces'].append(interface_info) # get default interface name netstat_out, _ = subp.call("netstat -nr | egrep -i '^0.0.0.0|default'", check=False) if len(netstat_out) and netstat_out[0]: first_matched_line = netstat_out[0] default_interface = first_matched_line.split(' ')[-1] elif len(meta['network']['interfaces']): default_interface = meta['network']['interfaces'][0]['name'] else: default_interface = None meta['network']['default'] = default_interface
def alive_interfaces(): """ Returns a list of all network interfaces which have UP state see ip link show dev eth0 will always return lo in a list if lo exists :return: [] of str """ alive_interfaces = set() try: for interface_name, interface in psutil.net_if_stats().iteritems(): if interface.isup: alive_interfaces.add(interface_name) except: context.log.debug('failed to use psutil.net_if_stats', exc_info=True) # fallback for centos6 for interface_name in netifaces.interfaces(): ip_link_out, _ = subp.call("ip link show dev %s" % interface_name, check=False) if ip_link_out: first_line = ip_link_out[0] r = re.match('.+state\s+(\w+)\s+.*', first_line) if r: state = r.group(1) if interface_name == 'lo' or state == 'UP': alive_interfaces.add(interface_name) return alive_interfaces
def _get_hostname_unix(): try: # fqdn out, err = subp.call('/bin/hostname -f') return out[0] except Exception: return None
def certificate_ocsp_uri(filename): result = None openssl_out, _ = subp.call("openssl x509 -in %s -noout -ocsp_uri" % filename, check=False) if openssl_out[0]: result = openssl_out[0] return result
def rlimit_nofile(self): if hasattr(self, 'rlimit'): return self.rlimit(psutil.RLIMIT_NOFILE)[1] else: # fallback for old systems without rlimit cat_limits, _ = subp.call("cat /proc/%s/limits | grep 'Max open files' | awk '{print $5}'" % self.pid, check=False) if cat_limits: return int(cat_limits[0])
def etc_release(self): """ FreeBSD has no *-release files. This uses uname -sr instead. """ uname_out, _ = subp.call('uname -sr') name, version = uname_out[0].split(' ', 1) self.meta['release']['name'] = name self.meta['release']['version'] = version self.meta['release']['version_id'] = version
def test_find_none(self): # Kill running NGINX so that it finds None subp.call('pgrep nginx |sudo xargs kill -SIGKILL', check=False) self.running = False # Setup dummy object context.objects.register(DummyRootObject()) container = NginxManager() nginxes = container._find_all() assert_that(nginxes, has_length(0)) root_object = context.objects.root_object assert_that(root_object.eventd.current, has_length(1)) # Reset objects... context.objects = None context._setup_object_tank()
def installed_nginx_packages(self): """ trying to find some installed packages """ result = {} dpkg_grep_nginx_out, _ = subp.call("dpkg -l | grep nginx") for line in dpkg_grep_nginx_out: gwe = re.match(self.dpkg_l_re, line) if gwe: if gwe.group(2).startswith('nginx'): result[gwe.group(2)] = gwe.group(3) return result
def nginx_v(path_to_binary): """ call -V and parse results :param path_to_binary str - path to binary :return {} - see result """ result = { 'version': None, 'plus': {'enabled': False, 'release': None}, 'ssl': {'built': None, 'run': None}, 'configure': {} } _, nginx_v_err = subp.call("%s -V" % path_to_binary) for line in nginx_v_err: # SSL stuff if line.lower().startswith('built with') and 'ssl' in line.lower(): parts = line.split(' ') lib_name, lib_version = parts[2:4] result['ssl'] = { 'built': [lib_name, lib_version], 'run': [lib_name, lib_version], } if line.lower().startswith('run with') and 'ssl' in line.lower(): parts = line.split(' ') lib_name, lib_version = parts[2:4] result['ssl']['run'] = [lib_name, lib_version] parts = line.split(':') if len(parts) < 2: continue # parse version key, value = parts if key == 'nginx version': # parse major version major_parsed = re.match('.*/([\d\w\.]+)', value) result['version'] = major_parsed.group(1) if major_parsed else value.lstrip() # parse plus version if 'plus' in value: plus_parsed = re.match('.*\(([\w\-]+)\).*', value) if plus_parsed: result['plus']['enabled'] = True result['plus']['release'] = plus_parsed.group(1) # parse configure if key == 'configure arguments': arguments = _parse_arguments(value) result['configure'] = arguments return result
def linux_name(): try: out, err = subp.call('cat /etc/*-release') except AmplifySubprocessError: out, err = subp.call('uname -s') return out[0].lower() for line in out: if line.startswith('ID='): return line[3:].strip('"').lower() full_output = '\n'.join(out).lower() if 'oracle linux' in full_output: return 'rhel' elif 'red hat' in full_output: return 'rhel' elif 'centos' in full_output: return 'centos' else: return 'linux'
def proc_cpuinfo(self): """ cat /proc/cpuinfo """ proc_cpuinfo_out, _ = subp.call('cat /proc/cpuinfo') for line in proc_cpuinfo_out: kv = re.match(self.proc_cpuinfo_re, line) if kv: key, value = kv.group(1), kv.group(2) if key.startswith('model name'): self.meta['processor']['model'] = value elif key.startswith('cpu cores'): self.meta['processor']['cores'] = value
def proc_cpuinfo(meta): """ cat /proc/cpuinfo """ proc_cpuinfo_out, _ = subp.call('cat /proc/cpuinfo') for line in proc_cpuinfo_out: kv = re.match('([\w|\s]+):\s*(.+)', line) if kv: key, value = kv.group(1), kv.group(2) if key.startswith('model name'): meta['processor']['model'] = value elif key.startswith('cpu cores'): meta['processor']['cores'] = value
def lscpu(self): """ lscpu """ lscpu_out, _ = subp.call('sysctl hw.machine_arch hw.clockrate') for line in lscpu_out: kv = re.match(self.lscpu_re, line) if kv: key, value = kv.group(1), kv.group(2) if key == 'hw.machine_arch': self.meta['processor']['architecture'] = value elif key == 'hw.clockrate': self.meta['processor']['mhz'] = value
def proc_cpuinfo(self): """ cat /proc/cpuinfo """ self.meta['processor']['cpus'] = psutil.cpu_count(logical=False) self.meta['processor']['cores'] = psutil.cpu_count() proc_cpuinfo_out, _ = subp.call('sysctl hw.model') for line in proc_cpuinfo_out: kv = re.match(self.proc_cpuinfo_re, line) if kv: key, value = kv.group(1), kv.group(2) if key.startswith('hw.model'): self.meta['processor']['model'] = value
def open_ssl(self, meta): """Old nginx uses standart openssl library - find its version""" if not meta['ssl']: openssl_out, _ = subp.call("dpkg -l | grep openssl") for line in openssl_out: gwe = re.match('([\d\w]+)\s+([\d\w\.\-]+)\s+([\d\w\.\-\+_~]+)\s', line) if gwe: if gwe.group(2).startswith('openssl'): meta['ssl'] = { 'built': [gwe.group(2), gwe.group(3)], 'run': [gwe.group(2), gwe.group(3)], }
def certificate_issuer(filename): results = {} openssl_out, _ = subp.call("openssl x509 -in %s -noout -issuer" % filename, check=False) for line in openssl_out: if line: for regex in ssl_regexs: match_obj = regex.match(line) if match_obj: results.update(match_obj.groupdict()) return results or None
def certificate_purpose(filename): results = {} openssl_out, _ = subp.call("openssl x509 -in %s -noout -purpose" % filename, check=False) for line in openssl_out: if line: split = line.split(' : ') if len(split) == 2: key, value = line.split(' : ') results[key] = value return results or None
def etc_release(meta): SystemMetaCollector.etc_release(meta) # centos6 has different *-release format # for example: CentOS release 6.7 (Final) if meta['release']['version_id'] is None and meta['release']['version'] is None: etc_release_out, _ = subp.call('cat /etc/centos-release') for line in etc_release_out: r = re.match('(\w+)\s+(\w+)\s+([\d\.]+)\s+([\w\(\)]+)', line) if r: meta['release']['name'] = r.group(1) meta['release']['version_id'] = r.group(3) meta['release']['version'] = '%s %s' % (r.group(3), r.group(4))
def test_default_interface(self): container = SystemManager() container._discover_objects() os_obj = container.objects.find_all(types=container.types)[0] collector = SystemMetaCollector(object=os_obj) collector.collect() default_from_netstat, _ = subp.call( 'netstat -nr | egrep -i "^0.0.0.0|default" | head -1 | sed "s/.*[ ]\([^ ][^ ]*\)$/\\1/"' )[0] default_interface = os_obj.metad.current['network']['default'] assert_that(default_interface, equal_to(default_from_netstat))
def certificate_dates(filename): keys = { 'notBefore': 'start', 'notAfter': 'end' } results = {} openssl_out, _ = subp.call("openssl x509 -in %s -noout -dates" % filename, check=False) for line in openssl_out: if line: key, value = line.split('=') if key in keys: results[keys[key]] = int(datetime.datetime.strptime(value, '%b %d %H:%M:%S %Y %Z').strftime('%s')) return results or None
def etc_release(self): """ Centos6 has different *-release format. For example: CentOS release 6.7 (Final) """ super(CentosSystemMetaCollector, self).etc_release() if self.meta['release']['version_id'] is None and self.meta['release']['version'] is None: etc_release_out, _ = subp.call('cat /etc/centos-release') for line in etc_release_out: r = re.match(self.etc_release_re, line) if r: self.meta['release']['name'] = r.group(1) self.meta['release']['version_id'] = r.group(3) self.meta['release']['version'] = '%s %s' % (r.group(3), r.group(4))
def network(meta): """ network """ # collect info for all the alive interfaces for interface in alive_interfaces(): addresses = netifaces.ifaddresses(interface) interface_info = { 'name': interface } # collect ipv4 and ipv6 addresses for proto, key in { 'ipv4': netifaces.AF_INET, 'ipv6': netifaces.AF_INET6 }.iteritems(): # get the first address protocol_data = addresses.get(key, [{}])[0] if protocol_data: addr = protocol_data.get('addr').split('%').pop(0) netmask = protocol_data.get('netmask') try: prefixlen = netaddr.IPNetwork('%s/%s' % (addr, netmask)).prefixlen except: prefixlen = None interface_info[proto] = { 'netmask': netmask, 'address': addr, 'prefixlen': prefixlen } # collect mac address interface_info['mac'] = addresses.get(netifaces.AF_LINK, [{}])[0].get('addr') meta['network']['interfaces'].append(interface_info) # get default interface name netstat_out, _ = subp.call("netstat -nr | egrep -i '^0.0.0.0|default'", check=False) if len(netstat_out) and netstat_out[0]: first_matched_line = netstat_out[0] default_interface = first_matched_line.split(' ')[-1] elif len(meta['network']['interfaces']): default_interface = meta['network']['interfaces'][0]['name'] else: default_interface = None meta['network']['default'] = default_interface
def certificate_full(filename): results = {} openssl_out, _ = subp.call("openssl x509 -in %s -noout -text" % filename, check=False) for line in openssl_out: for regex in ssl_text_regexs: match_obj = regex.match(line) if match_obj: results.update(match_obj.groupdict()) continue # If a match was made skip the DNS check. dns_matches = ssl_dns_regex.findall(line) if dns_matches: results['names'] = map(lambda x: x.split(':')[1], dns_matches) return results or None
def netstat(self): """ netstat -s (check for "SYNs to LISTEN sockets dropped” and "times the listen queue of a socket overflowed") """ new_stamp = time.time() netstat_out, _ = subp.call("netstat -s | grep -i 'times the listen queue of a socket overflowed'", check=False) gwe = re.match('\s*(\d+)\s*', netstat_out.pop(0)) new_value = int(gwe.group(1)) if gwe else 0 prev_stamp, prev_value = self.previous_values.get('system.net.listen_overflows', [None, None]) if prev_stamp: delta_value = new_value - prev_value self.object.statsd.incr('system.net.listen_overflows', delta_value) self.previous_values['system.net.listen_overflows'] = [new_stamp, new_value]
def block_devices(): """ Returns a list of all non-virtual block devices for a host :return: [] of str """ result = [] # using freebsd if os_name() == 'freebsd': geom_out, _ = subp.call( "geom disk list | grep 'Geom name:' | awk '{print $3}'", check=False) result = [device for device in geom_out if device] # using linux elif os.path.exists('/sys/block/'): devices = os.listdir('/sys/block/') result = [ device for device in devices if '/virtual/' not in os.readlink('/sys/block/%s' % device) ] return result
def find_packages(self, meta): """ Find a package with running binary """ package, version = None, None rpm_qf_out, rpm_qf_err = subp.call( 'rpm -qf %s ' % self.object.bin_path + '--queryformat="%{NAME} %{VERSION}-%{RELEASE}.%{ARCH}' + '\\n"', check=False) if rpm_qf_out and rpm_qf_out[0]: package, version = rpm_qf_out[0].split(' ') if rpm_qf_err: if 'is not owned by' in rpm_qf_err[0]: meta['warnings'].append( 'self-made binary, is not from any nginx package') if not package: return meta['packages'] = {package: version}
def etc_release(): """ /etc/*-release """ result = {'codename': None, 'id': None, 'name': None, 'version_id': None, 'version': None} mapper = { 'codename': ('VERSION_CODENAME', 'DISTRIB_CODENAME', 'UBUNTU_CODENAME'), 'id': 'ID', 'name': ('NAME', 'DISTRIB_ID'), 'version_id': ('VERSION_ID', 'DISTRIB_RELEASE'), 'version': ('VERSION', 'DISTRIB_DESCRIPTION') } for release_file in glob.glob("/etc/*-release"): etc_release_out, _ = subp.call('cat %s' % release_file) for line in etc_release_out: kv = re.match('(\w+)=(.+)', line) if kv: key, value = kv.group(1), kv.group(2) for var_name, release_vars in mapper.iteritems(): if key in release_vars: if result[var_name] is None: result[var_name] = value.replace('"', '') if result['name'] is None: result['name'] = 'unix' return result
def _find_all(): """ Tries to find all master processes :return: list of dict: nginx object definitions """ # get ps info ps_cmd = "ps xao pid,ppid,command | grep 'nginx[:]'" try: ps, _ = subp.call(ps_cmd) context.log.debug('ps nginx output: %s' % ps) except: context.log.debug('failed to find running nginx via %s' % ps_cmd) context.log.debug('additional info:', exc_info=True) if context.objects.root_object: context.objects.root_object.eventd.event( level=INFO, message='no nginx found' ) return [] # return an empty list if there are no master processes if not any('nginx: master process' in line for line in ps): context.log.debug('nginx masters amount is zero') return [] # collect all info about processes masters = {} try: for line in ps: # parse ps response line: # 21355 1 nginx: master process /usr/sbin/nginx gwe = re.match(r'\s*(?P<pid>\d+)\s+(?P<ppid>\d+)\s+(?P<cmd>.+)\s*', line) # if not parsed - go to the next line if not gwe: continue pid, ppid, cmd = int(gwe.group('pid')), int(gwe.group('ppid')), gwe.group('cmd').rstrip() # match nginx master process if 'nginx: master process' in cmd: if not launch_method_supported("nginx", ppid): continue # get path to binary, prefix and conf_path try: bin_path, prefix, conf_path, version = get_prefix_and_conf_path(cmd) except: context.log.debug('failed to find bin_path, prefix and conf_path for %s' % cmd) context.log.debug('', exc_info=True) else: # calculate local id local_id = hashlib.sha256('%s_%s_%s' % (bin_path, conf_path, prefix)).hexdigest() if pid not in masters: masters[pid] = {'workers': []} masters[pid].update({ 'version': version, 'bin_path': bin_path, 'conf_path': conf_path, 'prefix': prefix, 'pid': pid, 'local_id': local_id }) # match worker process elif 'nginx: worker process' in cmd: if ppid in masters: masters[ppid]['workers'].append(pid) else: masters[ppid] = dict(workers=[pid]) except Exception as e: exception_name = e.__class__.__name__ context.log.error('failed to parse ps results due to %s' % exception_name) context.log.debug('additional info:', exc_info=True) # collect results results = [] for pid, description in masters.iteritems(): if 'bin_path' in description: # filter workers with non-executable nginx -V (relative paths, etc) definition = { 'local_id': description['local_id'], 'type': NginxManager.type, 'root_uuid': context.uuid } results.append((definition, description)) return results
def restart_nginx(self): subp.call('service nginx restart')
def start_second_nginx(self, conf='nginx2.conf'): subp.call('/usr/sbin/nginx2 -c /etc/nginx/%s' % conf) self.second_started = True
def teardown_method(self, method): if self.running: subp.call('pgrep nginx |sudo xargs kill -SIGKILL') self.running = False super(RealNginxTestCase, self).teardown_method(method)
def nginx_installed(): try: subp.call('/usr/sbin/nginx -V') return True except: return False
def stop_first_nginx(self): subp.call('supervisorctl -c /etc/supervisord.conf stop nginx') time.sleep(0.5)
def teardown_class(cls): subp.call('supervisorctl -c /etc/supervisord.conf shutdown')
def restart_nginx(self): subp.call('service nginx restart', check=False) time.sleep(0.5)
def _find_all(): """ Tries to find all master processes :return: list of dict: nginx object definitions """ # get ps info ps_cmd = "ps xao pid,ppid,command | grep 'nginx[:]'" try: ps, _ = subp.call(ps_cmd) context.log.debug('ps nginx output: %s' % ps) except: context.log.error('failed to find running nginx via %s' % ps_cmd) context.log.debug('additional info:', exc_info=True) return [] # calculate total amount of nginx master processes # if no masters - return masters_amount = len(filter(lambda x: 'nginx: master process' in x, ps)) if masters_amount == 0: context.log.debug('nginx masters amount is zero') return [] # collect all info about processes masters = {} try: for line in ps: # parse ps response line: # 21355 1 nginx: master process /usr/sbin/nginx gwe = re.match(r'\s*(?P<pid>\d+)\s+(?P<ppid>\d+)\s+(?P<cmd>.+)\s*', line) # if not parsed - go to the next line if not gwe: continue pid, ppid, cmd = int(gwe.group('pid')), int(gwe.group('ppid')), gwe.group('cmd') # match daemonized master and skip the other stuff if 'nginx: master process' in cmd and ppid == 1: # get path to binary, prefix and conf_path try: bin_path, prefix, conf_path, version = get_prefix_and_conf_path(cmd) except: context.log.error('failed to find bin_path, prefix and conf_path for %s' % cmd) context.log.debug('', exc_info=True) else: # calculate local id local_id = hashlib.sha256('%s_%s_%s' % (bin_path, conf_path, prefix)).hexdigest() if pid in masters: masters[pid].update( dict( version=version, bin_path=bin_path, conf_path=conf_path, prefix=prefix, pid=pid, local_id=local_id ) ) else: masters[pid] = dict( version=version, bin_path=bin_path, conf_path=conf_path, prefix=prefix, pid=pid, local_id=local_id, workers=[] ) # match worker elif 'nginx: worker process' or 'nginx: cache manager process' in cmd: if ppid in masters: masters[ppid]['workers'].append(pid) else: masters[ppid] = dict(workers=[pid]) except Exception as e: exception_name = e.__class__.__name__ context.log.error('failed to parse ps results due to %s' % exception_name) context.log.debug('additional info:', exc_info=True) # collect results results = [] for pid, description in masters.iteritems(): if 'bin_path' in description: # filter workers with non-executable nginx -V (relative paths, etc) definition = { 'local_id': description['local_id'], 'type': NginxManager.type, 'root_uuid': context.objects.root_object.uuid if context.objects.root_object else None } results.append((definition, description)) return results
def is_amazon(): os_release, _ = subp.call('cat /etc/os-release', check=False) for line in os_release: if 'amazon linux ami' in line.lower(): return True return False
def _find_local(ps=None): """ Tries to find all mysqld processes :param ps: [] of str, used for debugging our parsing logic - should be None most of the time :return: [] of {} MySQL object definitions """ # get ps info try: # set ps output to passed param or call subp ps, _ = (ps, None) if ps is not None else subp.call(PS_CMD) context.log.debug('ps mysqld output: %s' % ps) except Exception as e: # log error exception_name = e.__class__.__name__ context.log.debug( 'failed to find running mysqld via "%s" due to %s' % ( PS_CMD, exception_name ) ) context.log.debug('additional info:', exc_info=True) # If there is a root_object defined, log an event to send to the # cloud. if context.objects.root_object: context.objects.root_object.eventd.event( level=INFO, message='no mysqld processes found' ) # break processing returning a fault-tolerant empty list return [] if not any('mysqld' in line for line in ps): context.log.info('no mysqld processes found') # break processing returning a fault-tolerant empty list return [] # collect all info about processes masters = {} try: for line in ps: parsed = ps_parser(line) # if not parsed - go to the next line if parsed is None: continue pid, ppid, cmd = parsed # unpack values # match master process if cmd.split(' ', 1)[0].endswith('mysqld'): if not launch_method_supported("mysql", ppid): continue try: conf_path = master_parser(cmd) except Exception as e: context.log.error('failed to find conf_path for %s' % cmd) context.log.debug('additional info:', exc_info=True) else: # calculate local_id local_id = hashlib.sha256('%s_%s' % (cmd, conf_path)).hexdigest() if pid not in masters: masters[pid] = {} masters[pid].update({ 'cmd': cmd.strip(), 'conf_path': conf_path, 'pid': pid, 'local_id': local_id }) except Exception as e: # log error exception_name = e.__class__.__name__ context.log.error('failed to parse ps results due to %s' % exception_name) context.log.debug('additional info:', exc_info=True) # format results results = [] for payload in masters.itervalues(): # only add payloads that have all the keys if 'cmd' in payload and 'conf_path' in payload and 'pid' in payload and 'local_id' in payload: results.append(payload) else: context.log.debug('MySQL "_find_all()" found an incomplete entity %s' % payload) return results
def setup_class(cls): subp.call('supervisorctl -c /etc/supervisord.conf shutdown', check=False) subp.call('supervisord -c /etc/supervisord.conf')
def start_fpm(self): if not self.running: subp.call('service php5-fpm start') self.running = True
def reload_nginx(self): subp.call('supervisorctl -c /etc/supervisord.conf signal HUP nginx') time.sleep(0.5)
def stop_fpm(self): if self.running: subp.call('service php5-fpm stop') self.running = False
def restart_nginx(self): subp.call('supervisorctl -c /etc/supervisord.conf restart nginx') time.sleep(0.5)
def restart_fpm(self): if self.running: subp.call('service php5-fpm restart')
def setup_method(self, method): super(RealNginxTestCase, self).setup_method(method) self.second_started = False subp.call('service nginx start') self.running = True
def nginx_v(bin_path): """ call -V and parse results :param bin_path str - path to binary :return {} - see result """ result = { 'version': None, 'plus': { 'enabled': False, 'release': None }, 'ssl': { 'built': None, 'run': None }, 'configure': {} } _, nginx_v_err = subp.call("%s -V" % bin_path) for line in nginx_v_err: # SSL stuff try: if line.lower().startswith('built with') and 'ssl' in line.lower(): match = BUILT_WITH_RE.search(line) result['ssl']['built'] = list(match.groups()) # example: "built with OpenSSL 1.0.2g-fips 1 Mar 2016 (running with OpenSSL 1.0.2g 1 Mar 2016)" match = RUNNING_WITH_RE.search(line) or match result['ssl']['run'] = list(match.groups()) elif line.lower().startswith('run with') and 'ssl' in line.lower(): match = RUN_WITH_RE.search(line) result['ssl']['run'] = list(match.groups()) except: context.log.error('Failed to determine ssl library from "%s"' % line, exc_info=True) parts = line.split(':', 1) if len(parts) < 2: continue # parse version key, value = parts if key == 'nginx version': # parse major version major_parsed = re.match('.*/([\d\w\.]+)', value) result['version'] = major_parsed.group( 1) if major_parsed else value.lstrip() # parse plus version if 'plus' in value: plus_parsed = re.match('.*\(([\w\-]+)\).*', value) if plus_parsed: result['plus']['enabled'] = True result['plus']['release'] = plus_parsed.group(1) # parse configure elif key == 'configure arguments': arguments = _parse_arguments(value) result['configure'] = arguments return result
def reload_nginx(self): subp.call('service nginx reload')
def teardown_method(self, method): subp.call('pgrep nginx |sudo xargs kill -SIGKILL') super(RealNginxTestCase, self).teardown_method(method)
def stop_first_nginx(self): subp.call('service nginx stop')
def reload_nginx(self): subp.call('service nginx reload', check=False) time.sleep(0.5)
def nginx_plus_installed(): out, err = subp.call('/usr/sbin/nginx -V') first_line = err[0] return True if 'nginx-plus' in first_line else False
def start_second_nginx(self, conf='nginx2.conf'): subp.call('/usr/sbin/nginx2 -c /etc/nginx/%s' % conf, check=False) self.second_started = True time.sleep(0.5)
def uname(self): """ Collects the full uname for the OS """ uname_cmd = 'uname -a' if not self.in_container else 'uname -s -r -v -m -p' uname_out, _ = subp.call(uname_cmd) self.meta['uname'] = uname_out.pop(0)
def stop_first_nginx(self): subp.call('service nginx stop', check=False) time.sleep(0.5)