def os_faults_client(): """Function fixture to get os_faults client. Returns: object: instantiated os_faults client """ setattr(os_faults_client, 'indestructible', True) if os_faults_config.OS_FAULTS_CONFIG: destructor = os_faults.connect( config_filename=os_faults_config.OS_FAULTS_CONFIG) else: destructor = os_faults.connect(os_faults_config.OS_FAULTS_DICT_CONFIG) destructor.verify() return destructor
def test_connect_fuel_with_libvirt(self): destructor = os_faults.connect(self.cloud_config) self.assertIsInstance(destructor, fuel.FuelManagement) self.assertIsInstance(destructor.node_discover, fuel.FuelManagement) self.assertEqual(1, len(destructor.power_manager.power_drivers)) self.assertIsInstance(destructor.power_manager.power_drivers[0], libvirt_driver.LibvirtDriver)
def nodes(config): """List cloud nodes""" config = config or os_faults.get_default_config_file() cloud_management = os_faults.connect(config_filename=config) hosts = [{'ip': host.ip, 'mac': host.mac, 'fqdn': host.fqdn} for host in cloud_management.get_nodes().hosts] click.echo(yaml.safe_dump(hosts, default_flow_style=False), nl=False)
def test_config_with_services_and_hosts(self): self.cloud_config['node_discover'] = { 'driver': 'node_list', 'args': [ { 'ip': '10.0.0.11', 'mac': '01:ab:cd:01:ab:cd', 'fqdn': 'node-1' }, { 'ip': '10.0.0.12', 'mac': '02:ab:cd:02:ab:cd', 'fqdn': 'node-2' }, ] } self.cloud_config['services'] = { 'app': { 'driver': 'process', 'args': { 'grep': 'myapp' }, 'hosts': ['10.0.0.11', '10.0.0.12'] } } destructor = os_faults.connect(self.cloud_config) app = destructor.get_service('app') self.assertIsNotNone(app) nodes = app.get_nodes() self.assertEqual(['10.0.0.11', '10.0.0.12'], nodes.get_ips()) self.assertEqual(['node-1', 'node-2'], nodes.get_fqdns()) self.assertEqual(['01:ab:cd:01:ab:cd', '02:ab:cd:02:ab:cd'], nodes.get_macs())
def test_connect_fuel_with_ipmi(self): cloud_config = { 'cloud_management': { 'driver': 'fuel', 'args': { 'address': '10.30.00.5', 'username': '******', } }, 'power_management': { 'driver': 'ipmi', 'args': { 'mac_to_bmc': { '00:00:00:00:00:00': { 'address': '55.55.55.55', 'username': '******', 'password': '******', } } } } } destructor = os_faults.connect(cloud_config) self.assertIsInstance(destructor, fuel.FuelManagement) self.assertIsInstance(destructor.power_management, ipmi.IPMIDriver)
def main(): # cloud config schema is an extension to os-client-config cloud_config = { 'cloud_management': { 'driver': 'fuel', 'args': { 'address': 'fuel.local', 'username': '******', } }, 'power_management': { 'driver': 'libvirt', 'args': { 'connection_uri': "qemu+ssh://[email protected]/system" } } } logging.info('Create connection to the cluster') destructor = os_faults.connect(cloud_config) logging.info('Verify connection to the cluster') destructor.verify() logging.info('Get all cluster nodes') nodes = destructor.get_nodes() logging.info('All cluster nodes: %s', nodes) logging.info('Pick and power off/on one of cluster nodes') one = nodes.pick() one.poweroff() one.poweron()
def test_connect_with_config_file(self, mock_os_path_exists): mock_os_faults_open = mock.mock_open( read_data=yaml.dump(self.cloud_config)) with mock.patch('os_faults.open', mock_os_faults_open, create=True): destructor = os_faults.connect() self.assertIsInstance(destructor, fuel.FuelManagement) self.assertIsInstance(destructor.power_management, libvirt_driver.LibvirtDriver)
def test_config_with_services(self, _): self.cloud_config['services'] = { 'app': { 'driver': 'process', 'args': {'grep': 'myapp'} } } destructor = os_faults.connect(self.cloud_config) app = destructor.get_service('app') self.assertIsNotNone(app)
def test_connect_with_env_config(self, mock_os_path_exists): mock_os_faults_open = mock.mock_open( read_data=yaml.dump(self.cloud_config)) with mock.patch('os_faults.open', mock_os_faults_open, create=True): destructor = os_faults.connect() self.assertIsInstance(destructor, fuel.FuelManagement) self.assertEqual(1, len(destructor.power_manager.power_drivers)) self.assertIsInstance(destructor.power_manager.power_drivers[0], libvirt_driver.LibvirtDriver) mock_os_faults_open.assert_called_once_with('/my/conf.yaml')
def os_faults_client(): """Function fixture to get os_faults client. Returns: object: instantiated os_faults client """ assert config.OS_FAULTS_CONFIG, \ "Environment variable OS_FAULTS_CONFIG is not defined" destructor = os_faults.connect(config_filename=config.OS_FAULTS_CONFIG) destructor.verify() return destructor
def run(self): LOG.debug("Injecting fault: %s", self.config["action"]) injector = os_faults.connect() try: os_faults.human_api(injector, self.config["action"]) self.set_status(consts.HookStatus.SUCCESS) except Exception as e: self.set_status(consts.HookStatus.FAILED) self.set_error(exception_name=type(e), description='Fault injection failure', details=str(e))
def test_connect_fuel_with_ipmi_libvirt_and_node_list(self): cloud_config = { 'node_discover': { 'driver': 'node_list', 'args': [ { 'ip': '10.0.0.11', 'mac': '01:ab:cd:01:ab:cd', 'fqdn': 'node-1' }, { 'ip': '10.0.0.12', 'mac': '02:ab:cd:02:ab:cd', 'fqdn': 'node-2' }, ] }, 'cloud_management': { 'driver': 'fuel', 'args': { 'address': '10.30.00.5', 'username': '******', }, }, 'power_managements': [{ 'driver': 'ipmi', 'args': { 'mac_to_bmc': { '00:00:00:00:00:00': { 'address': '55.55.55.55', 'username': '******', 'password': '******', } } } }, { 'driver': 'libvirt', 'args': { 'connection_uri': "qemu+ssh://[email protected]/system" } }] } destructor = os_faults.connect(cloud_config) self.assertIsInstance(destructor, fuel.FuelManagement) self.assertIsInstance(destructor.node_discover, node_list.NodeListDiscover) self.assertEqual(2, len(destructor.power_manager.power_drivers)) self.assertIsInstance(destructor.power_manager.power_drivers[0], ipmi.IPMIDriver) self.assertIsInstance(destructor.power_manager.power_drivers[1], libvirt_driver.LibvirtDriver)
def run(self): # get cloud configuration cloud_config = self.get_cloud_config() # connect to the cloud injector = os_faults.connect(cloud_config) # verify that all nodes are available if self.config.get("verify"): injector.verify() LOG.debug("Injecting fault: %s", self.config["action"]) os_faults.human_api(injector, self.config["action"])
def connect(self): """Connect to the cloud using os-faults.""" cloud_management = self.cloud_management if cloud_management is None: config_filename = self.config_filename if config_filename is None: self.config_filename = config_filename = ( _config_file.get_os_fault_config_filename()) LOG.info("OS-Faults: connecting with config filename %s", config_filename) self.cloud_management = cloud_management = os_faults.connect( config_filename=config_filename) return cloud_management
def test_connect_devstack(self): cloud_config = { 'cloud_management': { 'driver': 'devstack', 'args': { 'address': 'devstack.local', 'username': '******', 'private_key_file': '/my/path/pk.key', } } } destructor = os_faults.connect(cloud_config) self.assertIsInstance(destructor, devstack.DevStackManagement)
def main(): cloud_config = { 'cloud_management': { 'driver': 'universal', }, 'node_discover': { 'driver': 'node_list', 'args': [{ 'ip': '192.168.5.127', 'auth': { 'username': '******', 'private_key_file': 'openstack_key', } }, { 'ip': '192.168.5.128', 'auth': { 'username': '******', 'private_key_file': 'openstack_key', } }] }, 'services': { 'memcached': { 'driver': 'system_service', 'args': { 'service_name': 'memcached', 'grep': 'memcached', } } }, 'power_managements': [ { 'driver': 'libvirt', 'args': { 'connection_uri': 'qemu+unix:///system', } }, ] } logging.info('# Create connection to the cloud') cloud_management = os_faults.connect(cloud_config) logging.info('# Verify connection to the cloud') cloud_management.verify() logging.info('# Kill Memcached service on all nodes') service = cloud_management.get_service(name='memcached') service.kill()
def main(): # cloud config schema is an extension to os-client-config cloud_config = { 'cloud_management': { 'driver': 'fuel', 'args': { 'address': 'fuel.local', 'username': '******', } }, 'power_managements': [ { 'driver': 'ipmi', 'args': { 'mac_to_bmc': { '00:00:00:00:00:00': { 'address': '55.55.55.55', 'username': '******', 'password': '******', } } } } ] } logging.info('Create connection to the cluster') destructor = os_faults.connect(cloud_config) logging.info('Verify connection to the cluster') destructor.verify() logging.info('Get all cluster nodes') nodes = destructor.get_nodes() logging.info('All cluster nodes: %s', nodes) computes = nodes.filter(role='compute') one = computes.pick() logging.info('Pick one of compute nodes: %s', one) logging.info('Power off compute node') one.poweroff() logging.info('Power on compute node') one.poweron() logging.info('Done!')
def __init__(self, test_file): self.faults_dir = os.path.join(os.path.dirname(test_file), 'faults') faults_f_name = os.path.splitext(os.path.basename(test_file))[0] self.faults_file = os.path.join(self.faults_dir, faults_f_name) fault_config_f = os.path.join(CONF_DIR, CONF_FILE) try: self.cloud = os_faults.connect(config_filename=fault_config_f) self.cloud.verify() self.scenarios = self.get_scenarios() except os_faults.ansible.executor.AnsibleExecutionUnreachable: LOG.warning("Couldn't verify connectivity to the" " cloud with os-faults configuration") self.scenarios = None except FileNotFoundError: LOG.warning("Couldn't find os-faults configuration file") self.scenarios = None
def main(): # cloud config schema is an extension to os-client-config cloud_config = { 'cloud_management': { 'driver': 'devstack', 'args': { 'address': 'devstack.local', 'username': '******', } } } logging.info('# Create connection to the cloud') destructor = os_faults.connect(cloud_config) logging.info('# Verify connection to the cloud') destructor.verify() logging.info('# Restart Keystone service on all nodes') service = destructor.get_service(name='keystone') service.restart()
def main(): parser = argparse.ArgumentParser( prog='os-inject-fault', usage=USAGE, formatter_class=argparse.RawDescriptionHelpFormatter, epilog=_make_epilog()) parser.add_argument('-c', '--config', dest='config', help='path to os-faults cloud connection config') parser.add_argument('-d', '--debug', dest='debug', action='store_true') parser.add_argument('-v', '--verify', action='store_true', help='verify connection to the cloud') parser.add_argument('command', nargs='*', help='fault injection command, e.g. "restart keystone ' 'service"') args = parser.parse_args() debug = args.debug logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.DEBUG if debug else logging.INFO) config = args.config command = args.command if not command and not args.verify: parser.print_help() sys.exit(0) destructor = os_faults.connect(config_filename=config) if args.verify: destructor.verify() if command: command = ' '.join(command) os_faults.human_api(destructor, command)
def test_connection_stack_user(self): address = 'localhost' cloud_config = { 'cloud_management': { 'driver': 'devstack', 'args': { 'address': address, 'iface': 'lo', 'auth': { 'username': '******', 'private_key_file': '/opt/stack/.ssh/os-faults-key' } } } } LOG.info('# Create connection to the cloud') cloud_management = os_faults.connect(cloud_config) self.assertIsNotNone(cloud_management) LOG.info('# Verify connection to the cloud') cloud_management.verify() nodes = cloud_management.get_nodes() self.assertEqual(1, len(nodes)) self.assertEqual(address, nodes[0].ip) service = cloud_management.get_service('etcd') self.assertIsNotNone(service) nodes = service.get_nodes() self.assertEqual(1, len(nodes)) self.assertEqual(address, nodes[0].ip) selection = nodes.pick() self.assertIsNotNone(selection) self.assertEqual(1, len(selection)) self.assertEqual(address, selection[0].ip)
def test_connection_stack_user(self): cloud_config = { 'cloud_management': { 'driver': 'universal' }, 'node_discover': { 'driver': 'node_list', 'args': [{ 'ip': 'localhost', 'auth': { 'username': '******', 'private_key_file': '/opt/stack/.ssh/os-faults-key' } }] } } LOG.info('# Create connection to the cloud') cloud_management = os_faults.connect(cloud_config) self.assertIsNotNone(cloud_management) LOG.info('# Verify connection to the cloud') cloud_management.verify()
def verify(config): """Verify connection to the cloud""" config = config or os_faults.get_default_config_file() destructor = os_faults.connect(config_filename=config) destructor.verify()
def verify(config): """Verify connection to the cloud""" config = config or os_faults.get_default_config_file() cloud_management = os_faults.connect(config_filename=config) cloud_management.verify()
def test_connect_with_libvirt(self, _): destructor = os_faults.connect(self.cloud_config) self.assertIsInstance(destructor, devstack.DevStackCloudManagement) self.assertEqual(1, len(destructor.power_manager.power_drivers)) self.assertIsInstance(destructor.power_manager.power_drivers[0], libvirt.LibvirtDriver)
def main(): # The cloud config details could be defined within the script or a # separate os-faults.yml file and then loaded to the script. # Ex. cloud_management = os_faults.connect(config_filename='os-faults.yml') cloud_config = { 'cloud_management': { 'driver': 'universal' }, 'node_discover': { 'driver': 'node_list', 'args': [{ 'ip': '192.0.10.6', 'auth': { 'username': '******', 'private_key_file': '/home/stack/.ssh/id_rsa', 'become': True } }, { 'ip': '192.0.10.8', 'auth': { 'username': '******', 'private_key_file': '/home/stack/.ssh/id_rsa', 'become': True } }, { 'ip': '192.0.10.7', 'auth': { 'username': '******', 'private_key_file': '/home/stack/.ssh/id_rsa', 'become': True } }] }, 'services': { 'openvswitch': { 'driver': 'system_service', 'args': { 'service_name': 'openvswitch', 'grep': 'openvswitch' } } }, 'containers': { 'neutron_ovs_agent': { 'driver': 'docker_container', 'args': { 'container_name': 'neutron_ovs_agent' } }, 'neutron_api': { 'driver': 'docker_container', 'args': { 'container_name': 'neutron_api' } } } } logging.info('# Create connection to the cloud') cloud_management = os_faults.connect(cloud_config) logging.info('Verify connection to the cloud') cloud_management.verify() logging.info('Get nodes where openvswitch service is running') service = cloud_management.get_service(name='openvswitch') service_nodes = service.get_nodes() logging.info('Nodes: {}'.format(service_nodes)) logging.info('Stop openvswitch service on random node') random_node = service.get_nodes().pick() service.terminate(random_node) logging.info('Get nodes where neutron_ovs_agent container is running') container = cloud_management.get_container(name='neutron_ovs_agent') container_nodes = container.get_nodes() logging.info('Nodes: {}'.format(container_nodes)) logging.info('Restart neutron_ovs_agent container on the ' 'following nodes: {}'.format(container_nodes)) container.restart(container_nodes)
def main(): # cloud config schema is an extension to os-client-config cloud_config = { 'cloud_management': { 'driver': 'fuel', 'args': { 'address': 'fuel.local', 'username': '******', 'private_key_file': '~/.ssh/os_faults', } }, 'power_managements': [{ 'driver': 'libvirt', 'args': { 'connection_uri': 'qemu+ssh://[email protected]/system' } }] } logging.info('# Create connection to the cloud') destructor = os_faults.connect(cloud_config) logging.info('# Verify connection to the cloud') destructor.verify() # os_faults library operate with 2 types of objects: # service - is software that runs in the cloud, e.g. keystone, mysql, # rabbitmq, nova-api, glance-api # nodes - nodes that host the cloud, e.g. hardware server with hostname logging.info('# Get nodes where Keystone service runs') service = destructor.get_service(name='keystone') nodes = service.get_nodes() logging.info('Nodes: %s', nodes) logging.info('# Restart Keystone service on all nodes') service.restart() logging.info('# Pick and reset one of Keystone service nodes') one = nodes.pick() one.reset() logging.info('# Get all nodes in the cloud') nodes = destructor.get_nodes() logging.info('All cloud nodes: %s', nodes) logging.info('# Reset all these nodes') nodes.reset() logging.info('# Get node by FQDN: node-2.domain.tld') nodes = destructor.get_nodes(fqdns=['node-2.domain.tld']) logging.info('Node node-2.domain.tld: %s', nodes) logging.info('# Disable public network on node-2.domain.tld') nodes.disconnect(network_name='public') logging.info('# Enable public network on node-2.domain.tld') nodes.connect(network_name='public') logging.info('# Kill Glance API service on a single node') service = destructor.get_service(name='glance-api') nodes = service.get_nodes().pick() service.kill(nodes)
import os_faults import logging import nodefault as nf import time import json import os import monitor import sys with open('config.json') as file: cloud_config = json.load(file) cloud_management = os_faults.connect(cloud_config) cloud_management.verify() nf.setup(cloud_config) for n in cloud_config['node_discover']['args']: os.system("ssh -i ~/.ssh/geni stack@%s python recover.py & disown" % (n['ip'])) monitor.monitor(sys.argv[1], sys.argv[2]) time.sleep(5) nf.nodeop(cloud_config, 'node oom 0.3 on all')
def test_connect_fuel_with_libvirt(self): destructor = os_faults.connect(self.cloud_config) self.assertIsInstance(destructor, fuel.FuelManagement) self.assertIsInstance(destructor.power_management, libvirt_driver.LibvirtDriver)