def test_hosts_in_correct_networks(): ''' Verifies for each interface with an ip and network, that the ip could exist in that network's ip space ''' ifaces = api.Call('list.host.interface', ['expanded=true']) networks = api.Call('list.network') get_ip_space = lambda addr, mask: ipaddress.IPv4Network(f"{addr}/{mask}") networks = { net['network']: get_ip_space(net['address'], net['mask']) for net in api.Call('list.network') } errors = [] for row in ifaces: if not row['ip']: continue if not row['network']: continue if ipaddress.IPv4Address(row['ip']) not in networks[row['network']]: errors.append( f"{row['ip']} ({row['host']}) not in {networks[row['network']]} ({row['network']})" ) assert not errors
def test_stack_list_host(): output = api.Call('list host') hdr = '\n\nHOST RACK RANK APPLIANCE OS BOX ENVIRONMENT OSACTION INSTALLACTION STATUS' print(hdr) for o in output: vals = [ str(v) for v in o.values()] print(' '.join(vals))
def test_stack_list_host_interface(): output = api.Call('list host interface') hdr = '\n\nHOST INTERFACE DEFAULT NETWORK MAC IP NAME MODULE VLAN OPTIONS CHANNEL' print(hdr) for o in output: vals = [ str(v) for v in o.values()] print(' '.join(vals))
def test_stack_list_networks(): output = api.Call('list network') hdr = '\n\nNETWORK ADDRESS MASK GATEWAY MTU ZONE DNS PXE' print(hdr) for o in output: vals = [ str(v) for v in o.values()] print(' '.join(vals))
def test_stack_list_boxes(): output = api.Call('list box') hdr = '\n\nNAME OS PALLETS CARTS' print(hdr) for o in output: vals = [ str(v) for v in o.values()] print(' '.join(vals))
def test_stack_list_pallets(): output = api.Call('list pallet') hdr = '\n\nNAME VERSION RELEASE ARCH OS BOXES' print(hdr) for o in output: vals = [ str(v) for v in o.values()] print(' '.join(vals))
def getPorts(self): ports = ['9100'] pallets = self.getBoxPallets() if 'stacki-docker' in [x[0] for x in pallets]: ports.append('8080') if api.Call('list.attr', ['docker.experimental==True']): ports.append('9323') if 'stacki-kubernetes' in [x[0] for x in pallets]: ports.append('4194') return ports
def run(self, params, args): src,dest,svc = self.fillParams([ ('src', None), ('dest', None), ('service', None) ]) recurse = False hosts = self.getHostnames(args, managed_only=1) me = self.db.getHostname('localhost') if not src: raise ParamError(self,'src', "- no source is given.") if not os.path.isfile(src): if os.path.isdir(src): recurse = True else: raise CommandError(self, '%s is not a file or a directory' % src) if not dest: raise ParamError(self,'dest', "- no destination is given.") threads = [] for host in hosts: if me != host: if recurse: cmd = 'scp -r %s %s:%s ' % (src,host,dest) else: cmd = 'scp %s %s:%s ' % (src,host,dest) try: p = Parallel(cmd) p.start() threads.append(p) except: pass # # collect the threads # for thread in threads: thread.join(timeout) if svc: cmd = 'systemctl daemon-reload' cmd += 'systemctl restart %s' % svc api.Call('run.host',[hosts, cmd])
def test_link_status(self, host): errors = [] interface_fields = namedtuple('interface', ['name', 'ip', 'options']) # Get all interfaces for this host try: hostname = host.check_output('hostname') except paramiko.ssh_exception.NoValidConnectionsError: pytest.fail(f'Could not ssh into host') interfaces = [ interface_fields(name=backend['interface'], ip=backend['ip'], options=backend['options']) for backend in api.Call('list host interface', [hostname]) ] for link in interfaces: # Only want to check interfaces that have a valid ip or have dhcp set in options if (link.ip or (link.options and 'dhcp' in link.options.lower()) ) and link.name.lower() != 'ipmi': # Run ethtool on the interface ethtool_output = host.run( f'ethtool {link.name}').stdout.splitlines() else: continue # Flag which is set to true if ethtool reports the link is up link_up = False for line in ethtool_output: if 'link detected: yes' in line.lower(): link_up = True if not link_up: errors.append(f'{link.name}') assert not errors, f'On host {hostname} the following links were found not connected or down: {", ".join(errors)}'
def get_network_name(self, ip): nets = api.Call('list network') nlist = [] ndict = {} for n in nets: mask, addr = n['mask'], n['address'] hostnet = IPv4Network(u'%s/%s' % (addr, mask)) if ip_address(u'%s' % ip) in ip_network(hostnet): nlist.append(n['network']) # compare two nets clist = [] if len(nlist) > 1: for j in nets: if j['network'] in nlist: mask, addr = j['mask'], j['address'] clist.append(IPv4Network(u'%s/%s' % (addr, mask))) if ip_network(clist[0]).compare_networks(ip_network( clist[1])) == -1: return nlist[1] else: return nlist[0]
import pytest import testinfra import paramiko import socket from stack import api from collections import namedtuple testinfra_hosts = [ host['host'] for host in api.Call('list host', args=['a:backend']) ] class TestLinkUp: """ See if the interfaces stacki configures are actually up on the hosts """ def test_link_status(self, host): errors = [] interface_fields = namedtuple('interface', ['name', 'ip', 'options']) # Get all interfaces for this host try: hostname = host.check_output('hostname') except paramiko.ssh_exception.NoValidConnectionsError: pytest.fail(f'Could not ssh into host') interfaces = [ interface_fields(name=backend['interface'], ip=backend['ip'], options=backend['options']) for backend in api.Call('list host interface', [hostname]) ]
def test_storage_partition(self, host): # Get current hostname and test if we can ssh into the host # Otherwise fail the test try: hostname = host.check_output('hostname') except paramiko.ssh_exception.NoValidConnectionsError: pytest.fail(f'Could not ssh into host') # Get stacki storage config using proper scoping storage_config = api.Call('list host storage partition', [hostname]) # Otherwise if we are on an older version of stacki without proper scoping, # use report host instead and convert the dict output as a string into an actual dict if not storage_config: report_storage = api.Call('report host storage partition', [hostname]) # If we still cannot get stacki's storage config for the current host, # skip the test if not report_storage or report_storage[0] == '[]': pytest.skip( f'Using default stacki partition config for host {hostname}' ) else: storage_config = json.loads(report_storage[0].replace( "'", '"').replace('None', '""')) # Go through the stacki storage config and see # if it matches what's actually on the disk/disks errors = [] for partition in storage_config: # Info needed for each partiton conf_mntpt = partition['mountpoint'] # Check based on mountpoint. # TODO: Skip if there is no mountpoint until we better support # OSes that don't preserve partition ordering (like RedHat/CentOS). if not conf_mntpt: continue # TODO: Exclude special swap, biosboot, and LVM partitions for now until # we add support for checking them. if any(excluded_mountpoint in conf_mntpt.lower() for excluded_mountpoint in ("swap", "biosboot", "vg_sys", "pv")): continue # If a mountpoint was configured, it better exist on disk. if not host.mount_point(conf_mntpt).exists: errors.append(f'Could not find {conf_mntpt} on disk') continue # Check the FS type if it does exist on disk. if host.mount_point(conf_mntpt).filesystem != partition['fstype']: errors.append( f'{conf_mntpt} found with file system {host.mount_point(conf_mntpt).filesystem} ' f'but was configured with {partition["fstype"]}') # Check partition size within 100MB due to lsblk bytes conversion # TODO: If a partition size is 0, skip for now. We can't easily figure out # the expected size of `fill the rest of the disk` partitions due to some # supported OSes not preserving partition ordering, and not all partitions # will have mountpoints. expected_size = int(partition['size']) actual_size = get_part_size(host, conf_mntpt) if expected_size != 0 and not math.isclose( expected_size, actual_size, abs_tol=100): errors.append( f'{conf_mntpt} found with size {actual_size} but was configured to be of size {expected_size}' ) # Check if the label should be present for the current partition # and check if it matches the actual partition label. The regex captures # all non whitespace characters after "--label=" to a named group, and uses # that as the label to check against. label = re.match(r"^.*--label=(?P<label>\S+)", partition['options']) if label and label.groupdict(): config_label = label.groupdict()['label'] curr_label = get_part_label(host, conf_mntpt) if not curr_label: errors.append( f'{conf_mntpt} configured with label {config_label} but no label found' ) elif config_label != curr_label: errors.append( f'{conf_mntpt} configured with label {config_label} but found with {curr_label}' ) # Check the options. The regex captures all non whitespace characters after "--fsoptions=" # to a named group, and uses that as the fsoptions to check against. # TODO: We don't currently support the SUSE AutoYaST fsopt key, but this will need to be updated # when we do. options = re.match(r"^.*--fsoptions=(?P<options>\S+)", partition["options"]) if options and options.groupdict(): # Get a list of options and remove the "defaults" option that doesn't appear to add anything. options = [ option.strip() for option in options.groupdict()["options"].split(",") if option.strip() and option != "defaults" ] actual_options = host.mount_point(conf_mntpt).options # Since there might be more "default" options than were explicitly specified, just check that # each explicitly specified option exists. missing_options = [ option for option in options if option not in actual_options ] if missing_options: errors.append( f'{conf_mntpt} missing options {missing_options} ' f'from configured options {actual_options}') errors = "\n ".join(errors) assert not errors, f'Host {hostname} found with partitioning mismatch from original config:\n {errors}'
import pytest import math import json import paramiko import re from stack import api from stack.commands.report.system.tests.stack_test_util import get_part_label, get_part_size # Test all backend appliances testinfra_hosts = [host['host'] for host in api.Call('list host a:backend')] class TestStoragePartition: """ Test if the way stacki configured the disks is still how they are partitioned """ def test_storage_partition(self, host): # Get current hostname and test if we can ssh into the host # Otherwise fail the test try: hostname = host.check_output('hostname') except paramiko.ssh_exception.NoValidConnectionsError: pytest.fail(f'Could not ssh into host') # Get stacki storage config using proper scoping storage_config = api.Call('list host storage partition', [hostname]) # Otherwise if we are on an older version of stacki without proper scoping, # use report host instead and convert the dict output as a string into an actual dict if not storage_config: report_storage = api.Call('report host storage partition',
# Based on https://github.com/ActiveState/code/tree/master/recipes/Python/577649_dhcp_query # With MIT license import struct import socket import pathlib import pytest import binascii from stack import api from stack.util import _exec from random import randint pxe_networks = [net['network'] for net in api.Call('list network', args=['pxe=true'])] interfaces = api.Call('list.host.interface', args=['a:frontend']) pxe_interfaces = {inter['interface']: inter['mac'] for inter in interfaces if inter['network'] in pxe_networks} def build_dhcp_packet(mac): mac_address = binascii.unhexlify(mac) # Ensure a unique transaction id when a DHCP request # is sent back transaction_id = b'' for i in range(4): t = randint(0, 255) transaction_id += struct.pack('!B', t) # Build DHCP discover packet in binary form packet = { 'boot_request': b'\x01', 'ethernet': b'\x01',