def create(lab_name, allowed_drivers=None, is_mgm_only=False, is_interactive=False): import validators from lab.laboratory import Laboratory from lab.mercury.nodes import MercuryMgm if not validators.ipv4(lab_name): if lab_name not in WithMercury.KNOWN_PODS_DIC: raise ValueError('"{}" unknown, possible names are {}'.format(lab_name, WithMercury.KNOWN_PODS_DIC.keys())) else: ip = WithMercury.KNOWN_PODS_DIC[lab_name]['mgm_ip'] else: ip = lab_name mgm, status, setup_data_dic, release_tag, gerrit_tag = MercuryMgm.create_from_actual(ip=ip, password=WithMercury.COMMON_PASSWORD) if not is_interactive and '| ORCHESTRATION | Success |' not in status: raise RuntimeError('{} is not properly installed: {}'.format(lab_name, status)) driver = setup_data_dic['MECHANISM_DRIVERS'] if allowed_drivers: assert driver.lower() in allowed_drivers, 'driver {} not in {}'.format(driver, allowed_drivers) pod = Laboratory(name=lab_name, driver=driver, release_tag=release_tag, gerrit_tag=gerrit_tag, setup_data_dic=setup_data_dic) if is_mgm_only: return mgm WithMercury.create_from_setup_data(pod=pod, mgm=mgm, is_interactive=is_interactive) if pod.driver == WithMercury.VTS: pod.driver_version = pod.vtc.r_vtc_get_version() else: pod.driver_version = 'vpp XXXX' return pod
def __init__(self, config): from lab.laboratory import Laboratory super(RunnerCloud9, self).__init__(config=config) self.lab = Laboratory(config_path=config['yaml-path']) self.director = self.lab.director()
def setUpClass(cls): from lab.laboratory import Laboratory l = Laboratory('g7-2') cls._vtc = l.get_node_by_id('vtc1') cls._xrvrs = cls._vtc.r_vtc_get_xrvrs() cls._vtfs = cls._vtc.r_vtc_get_vtfs()
def setUpClass(cls): from lab.laboratory import Laboratory l = Laboratory('g7-2') cls._vtc = l.get_node_by_id('vtc1') cls._xrvrs = cls._vtc.r_vtc_get_xrvrs() cls._vtfs = cls._vtc.r_vtc_get_vtfs()
def deploy_cloud(self, list_of_servers): from lab.laboratory import Laboratory from lab.cloud import Cloud if not list_of_servers: lab = Laboratory(config_path=self._lab_cfg) list_of_servers.append(lab.get_director()) list_of_servers.extend(lab.get_controllers()) list_of_servers.extend(lab.get_computes()) director = list_of_servers[0] openrc_path = None openrc_body = None for openrc_path in ['/home/stack/overcloudrc', 'keystonerc_admin']: ans = director.run(command='cat {0}'.format(openrc_path), warn_only=True) if 'No such file or directory' not in ans: openrc_body = ans break if not openrc_path: raise RuntimeError('Provided lab does not contain any valid cloud') for host in list_of_servers: host.actuate_hostname() return Cloud.from_openrc(name=self._cloud_name, mediator=director, openrc_as_string=openrc_body)
def create(lab_name, allowed_drivers=None, is_mgm_only=False, is_interactive=False): import validators import yaml from lab.laboratory import Laboratory if not validators.ipv4(lab_name): if lab_name not in WithMercury.KNOWN_PODS_DIC: raise ValueError('"{}" unknown, possible names are {}'.format( lab_name, WithMercury.KNOWN_PODS_DIC.keys())) else: ip = WithMercury.KNOWN_PODS_DIC[lab_name]['mgm_ip'] else: ip = lab_name mgm = WithMercury.check_mgm_node(ip=ip) separator = 'separator' cmds = [ 'ciscovim install-status', 'cat setup_data.yaml', 'hostname', 'grep -E "image_tag|RELEASE_TAG" defaults.yaml' ] cmd = ' && echo {} && '.format(separator).join(cmds) a = mgm.exe(cmd, is_warn_only=True) if not is_interactive and '| ORCHESTRATION | Success |' not in a: raise RuntimeError('{} is not properly installed'.format(lab_name)) _, setup_data_text, hostname, grep = a.split(separator) setup_data_dic = yaml.load(setup_data_text) driver = setup_data_dic['MECHANISM_DRIVERS'] if allowed_drivers: assert driver.lower( ) in allowed_drivers, 'driver {} not in {}'.format( driver, allowed_drivers) pod = Laboratory( name=lab_name, driver=driver, release_tag=grep.split('\n')[1].split(':')[-1].strip(), gerrit_tag=grep.split('\n')[2].split(':')[-1].strip(), setup_data_dic=setup_data_dic) if is_mgm_only: return mgm WithMercury.create_from_setup_data(pod=pod, mgm=mgm, is_interactive=is_interactive) if pod.driver == WithMercury.VTS: pod.driver_version = pod.vtc.r_vtc_get_version() else: pod.driver_version = 'vpp XXXX' return pod
def info(pod_name=None, regex=None): """fab info:g10,regex\t\t\tExec grep regex """ from lab.laboratory import Laboratory pod = Laboratory.create(lab_name=pod_name) pod.r_collect_info(regex=regex, comment=regex)
def __init__(self, config): from lab.laboratory import Laboratory super(RunnerCloud9, self).__init__(config=config) self.lab = Laboratory(config_path=config['yaml-path']) self.director = self.lab.director()
def bash(): """fab bash\t\t\t\tDefine bash aliases for lab""" from lab.laboratory import Laboratory from lab.nodes.lab_server import LabServer from lab.nodes.virtual_server import VirtualServer pod = Laboratory.create(lab_name=get_user_input( obj=Laboratory.MERCURY_DIC.keys())) aliases = [] for node in pod.nodes.values(): if not isinstance(node, VirtualServer): aliases.append( 'alias z{n}="sshpass -p {p} ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {u}@{ip}"' .format(n=node.id, p=node.oob_password, u=node.oob_username, ip=node.oob_ip)) # cimc if isinstance(node, LabServer): ip, username, password = ( node.proxy.ip + ' ' + 'ssh -o StrictHostKeyChecking=no ' + node.id, node.proxy.username, node.proxy.password) if node.proxy else (node.ip, node.username, node.password) password = '******' + password + ' ' if password else '' # if password is None use the key pair to ssh aliases.append( 'alias {n}="{p}ssh -t -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {u}@{ip}"' .format(p=password, n=node.id, u=username, ip=ip)) # ssh with open('tmp.aliases', 'w') as f: f.write('\n'.join(sorted(aliases))) f.write('\nPS1="({}) $PS1 "\n'.format(pod))
def info(pod_name=None, regex=None): """fab info:g10,regex\t\t\tExec grep regex """ from lab.laboratory import Laboratory pod = Laboratory.create(lab_name=pod_name) pod.r_collect_info(regex=regex, comment=regex)
def main(): import json import os from lab.laboratory import Laboratory os.environ['DISABLE_SQE_LOG'] = 'Yes' args = parse_args() l = Laboratory(config_path='g7-2.yaml') inventory = l.get_ansible_inventory() if args.list: output = json.dumps(inventory) elif args.host: output = inventory[args.host] else: output = 'Nothing to return' print(output)
def create(lab_name, allowed_drivers=None, is_mgm_only=False, is_interactive=False): import validators from lab.laboratory import Laboratory from lab.mercury.nodes import MercuryMgm if not validators.ipv4(lab_name): if lab_name not in WithMercury.KNOWN_PODS_DIC: raise ValueError('"{}" unknown, possible names are {}'.format( lab_name, WithMercury.KNOWN_PODS_DIC.keys())) else: ip = WithMercury.KNOWN_PODS_DIC[lab_name]['mgm_ip'] else: ip = lab_name mgm, status, setup_data_dic, release_tag, gerrit_tag = MercuryMgm.create_from_actual( ip=ip, password=WithMercury.COMMON_PASSWORD) if not is_interactive and '| ORCHESTRATION | Success |' not in status: raise RuntimeError('{} is not properly installed: {}'.format( lab_name, status)) driver = setup_data_dic['MECHANISM_DRIVERS'] if allowed_drivers: assert driver.lower( ) in allowed_drivers, 'driver {} not in {}'.format( driver, allowed_drivers) pod = Laboratory(name=lab_name, driver=driver, release_tag=release_tag, gerrit_tag=gerrit_tag, setup_data_dic=setup_data_dic) if is_mgm_only: return mgm WithMercury.create_from_setup_data(pod=pod, mgm=mgm, is_interactive=is_interactive) if pod.driver == WithMercury.VTS: pod.driver_version = pod.vtc.r_vtc_get_version() else: pod.driver_version = 'vpp XXXX' return pod
def tmux(): """Creates a number of config files for tmux utility""" import os from lab.laboratory import Laboratory from lab.with_config import ls_configs def form_new_window(ip, u, p, n, l): first_part = 'tmux new-window -t {lab}:{counter} -n {role} '.format(role=n, lab=l, counter=counter) cmd_part = '"sshpass -p {password} ssh {username}@{ip}"'.format(username=u, ip=ip, password=p) f.write(first_part + cmd_part + '\n') with open(os.path.expanduser('~/.tmux.conf'), 'w') as f: f.write('set -g prefix C-a\n') # default Ctrl-a for Alt use m- f.write('set -g history-limit 5000\n') # 5000 lines in scrolling f.write('set -g base-index 1\n') # start numerating from 1 with open(os.path.expanduser('~/tmux'), 'w') as f: for lab_config in ls_configs(): lab = Laboratory(config_path=lab_config) name = lab_config.strip('.yaml') counter = 2 f.write('tmux new-session -s {0} -n sqe -d\n'.format(name)) director = lab.director() ucsm_ip, ucsm_username, ucsm_password = lab.ucsm_creds() n9k_ip1, n9k_ip2, n9k_username, n9k_password = lab.n9k_creds() form_new_window(ip=director.ip, u=director.username, p=director.password, n='di', l=name) counter += 1 form_new_window(ip=ucsm_ip, u=ucsm_username, p=ucsm_password, n='fi', l=name) counter += 1 form_new_window(ip=n9k_ip1, u=n9k_username, p=n9k_password, n='n9', l=name) counter += 1 f.write('tmux select-window -t g10:1\n') f.write('tmux -2 attach-session -t g10\n')
class ProviderCobblerLab(Provider): """Creates systems on cobbler for all lab nodes which sits on network marked as is-pxe. Checks that nodes indeed has proper NICs configured. Configure them if not. Then reboot all the systems""" def sample_config(self): return {'hardware-lab-config': 'some yaml with valid lab configuration'} def __init__(self, config): from lab.laboratory import Laboratory super(ProviderCobblerLab, self).__init__(config=config) self._lab = Laboratory(config_path=config['hardware-lab-config']) def wait_for_servers(self): from lab.cobbler import CobblerServer cobbler = self._lab.get_nodes_by_class(CobblerServer)[0] return cobbler.cobbler_deploy()
def bash(): """fab bash\t\t\t\tDefine bash aliases for lab""" from lab.laboratory import Laboratory from lab.nodes.lab_server import LabServer from lab.nodes.virtual_server import VirtualServer pod = Laboratory.create(lab_name=get_user_input(obj=Laboratory.MERCURY_DIC.keys())) aliases = [] for node in pod.nodes.values(): if not isinstance(node, VirtualServer): aliases.append('alias z{n}="sshpass -p {p} ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {u}@{ip}"'.format(n=node.id, p=node.oob_password, u=node.oob_username, ip=node.oob_ip)) # cimc if isinstance(node, LabServer): ip, username, password = (node.proxy.ip + ' ' + 'ssh -o StrictHostKeyChecking=no ' + node.id, node.proxy.username, node.proxy.password) if node.proxy else (node.ip, node.username, node.password) password = '******' + password + ' ' if password else '' # if password is None use the key pair to ssh aliases.append('alias {n}="{p}ssh -t -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no {u}@{ip}"'.format(p=password, n=node.id, u=username, ip=ip)) # ssh with open('tmp.aliases', 'w') as f: f.write('\n'.join(sorted(aliases))) f.write('\nPS1="({}) $PS1 "\n'.format(pod))
def cmd(): """fab cmd\t\t\t\tRun single command on lab device """ import inspect from fabric.operations import prompt import time from lab.deployers.deployer_existing_light import DeployerExistingLight from lab.with_log import lab_logger from lab.laboratory import Laboratory pod_names = Laboratory.MERCURY_DIC['pods'].keys() l_and_s_names = map(lambda x: 'l' + x, pod_names) + map( lambda x: 's' + x, pod_names) _, pod_name = get_user_input(obj=l_and_s_names) root = Laboratory.create( lab_name=pod_name[1:], is_interactive=True) if pod_name[0] == 'l' else DeployerExistingLight( pod_name[1:])() obj = root while True: obj, method = get_user_input(obj=obj) try: obj.log('{} executing ......................'.format(method)) time.sleep(1) parameters = method.func_code.co_varnames[1:method.func_code. co_argcount] arguments = [] for parameter in parameters: argument = prompt(text='{p}=? '.format(p=parameter)) if argument.startswith('['): argument = argument.strip('[]').split(',') elif argument in ['True', 'true', 'yes']: argument = True elif argument in ['False', 'false', 'no']: argument = False arguments.append(argument) results = method(*arguments) if arguments else method() time.sleep(1) obj.log('{}() returns:\n\n {}\n'.format(method, results)) except Exception as ex: lab_logger.exception('\n Exception: {0}'.format(ex)) prompt('')
class ProviderCobblerLab(Provider): """Creates systems on cobbler for all lab nodes which sits on network marked as is-pxe. Checks that nodes indeed has proper NICs configured. Configure them if not. Then reboot all the systems""" def sample_config(self): return { 'hardware-lab-config': 'some yaml with valid lab configuration' } def __init__(self, config): from lab.laboratory import Laboratory super(ProviderCobblerLab, self).__init__(config=config) self._lab = Laboratory(config_path=config['hardware-lab-config']) def wait_for_servers(self): from lab.nodes.cobbler import CobblerServer cobbler = self._lab.get_nodes_by_class(CobblerServer)[0] return cobbler.cobbler_deploy()
class ProviderExistingLab(Provider): """Creates servers from base hardware lab config """ def sample_config(self): return {'hardware-lab-config': 'g10.yaml'} def __init__(self, config): from lab.laboratory import Laboratory super(ProviderExistingLab, self).__init__(config=config) self._lab = Laboratory(config_path=config['hardware-lab-config']).get_director() def create_servers(self): return self._lab.get_nodes_by_class() def wait_for_servers(self): servers = self.create_servers() for server in servers: server.actuate_hostname() return servers
def cmd(): """fab cmd\t\t\t\tRun single command on lab device """ import inspect from fabric.operations import prompt import time from lab.deployers.deployer_existing_light import DeployerExistingLight from lab.with_log import lab_logger from lab.laboratory import Laboratory pod_names = Laboratory.MERCURY_DIC['pods'].keys() l_and_s_names = map(lambda x: 'l' + x, pod_names) + map(lambda x: 's' + x, pod_names) _, pod_name = get_user_input(obj=l_and_s_names) root = Laboratory.create(lab_name=pod_name[1:], is_interactive=True) if pod_name[0] == 'l' else DeployerExistingLight(pod_name[1:])() obj = root while True: obj, method = get_user_input(obj=obj) try: obj.log('{} executing ......................'.format(method)) time.sleep(1) parameters = method.func_code.co_varnames[1:method.func_code.co_argcount] arguments = [] for parameter in parameters: argument = prompt(text='{p}=? '.format(p=parameter)) if argument.startswith('['): argument = argument.strip('[]').split(',') elif argument in ['True', 'true', 'yes']: argument = True elif argument in ['False', 'false', 'no']: argument = False arguments.append(argument) results = method(*arguments) if arguments else method() time.sleep(1) obj.log('{}() returns:\n\n {}\n'.format(method, results)) except Exception as ex: lab_logger.exception('\n Exception: {0}'.format(ex)) prompt('')
def __call__(self, *args, **kwargs): from lab.cloud.openstack import OS from lab.laboratory import Laboratory mgm = Laboratory.create(lab_name=self.pod_name, is_mgm_only=True) return OS(name=self.pod_name, mediator=mgm, openrc_path='openrc')
def deploy_cloud(self, list_of_servers): from lab.cloud.openstack import OS from fabric.operations import prompt from lab.laboratory import Laboratory lab = Laboratory(config_path=self._lab_path) build_node = lab.mgm mercury_tag = self._mercury_installer_location.split('/')[-1] self.log(message='Deploying {} on {}'.format(mercury_tag, build_node)) if self._type_of_install == 'iso': while True: ip, username, password = build_node.get_oob() ans = prompt( 'Run remote mounted ISO installation on http://{} ({}/{}) with RemoteShare={} RemoteFile=buildnode.iso, print FINISH when ready' .format(ip, username, password, self._mercury_installer_location)) if ans == 'FINISH': break lab.r_n9_configure(is_clean_before=True) build_node.r_configure_mx_and_nat() ans = build_node.exe('ls -d installer*', is_warn_only=True) if 'installer-' + mercury_tag in ans: installer_dir = ans build_node.exe(command='./unbootstrap.sh -y > /dev/null', in_directory=installer_dir, is_warn_only=True, estimated_time=100) is_get_tarball = False elif 'No such file or directory' in ans: installer_dir = 'installer-{}'.format(mercury_tag) is_get_tarball = True else: old_installer_dir = ans installer_dir = 'installer-{}'.format(mercury_tag) build_node.exe(command='./unbootstrap.sh -y > /dev/null', in_directory=old_installer_dir, is_warn_only=True, estimated_time=100) build_node.exe('rm -f openstack-configs', is_warn_only=True) build_node.exe('rm -rf {}'.format(old_installer_dir)) is_get_tarball = True if is_get_tarball: tar_url = self._mercury_installer_location + '/mercury-installer-internal.tar.gz' tar_path = build_node.r_get_remote_file(url=tar_url) build_node.exe('tar xzf {}'.format(tar_path)) build_node.exe('rm -f {}'.format(tar_path)) ans = build_node.exe('cat /etc/cisco-mercury-release', is_warn_only=True) if self._is_force_redeploy or mercury_tag not in ans: cfg_body = lab.create_mercury_setup_data_yaml( is_add_vts_role=self._is_add_vts_role) build_node.r_put_string_as_file_in_dir(string_to_put=cfg_body, file_name='setup_data.yaml', in_directory=installer_dir + '/openstack-configs') build_node.exe( "find {} -name '*.pyc' -delete".format(installer_dir)) build_node.exe('rm -rf /var/log/mercury/*') try: build_node.exe( command='./runner/runner.py -y -s 7,8 > /dev/null', in_directory=installer_dir, estimated_time=4000 ) # run steps 1-6 during which we get all control and computes nodes re-loaded except: build_node.exe('cat /var/log/mercury/installer/*') raise RuntimeError( 'Mercury ./runner/runner.py -y -s 7,8 failed') if not self._is_add_vts_role: cobbler = lab.get_cobbler() cobbler.cobbler_deploy() self._vts_deployer.execute({ 'servers': lab.get_vts_hosts(), 'clouds': [] }) try: build_node.exe( command='./runner/runner.py -y -p 7,8 > /dev/null', in_directory=installer_dir, estimated_time=600) # run steps 7-8 except: build_node.exe('cat /var/log/mercury/installer/*') raise RuntimeError( 'Mercury ./runner/runner.py -y -p 7,8 failed') build_node.exe(command='echo {} > /etc/cisco-mercury-release'. format(mercury_tag)) lab.r_collect_information(regex='ERROR', comment='after mercury runner') openrc_body = build_node.exe(command='cat openstack-configs/openrc') return OS.from_openrc(name=self._lab_path.strip('.yaml'), mediator=build_node, openrc_as_string=openrc_body)
def __init__(self, lab_name, allowed_drivers): from lab.laboratory import Laboratory self.pod = Laboratory.create(lab_name=lab_name, allowed_drivers=allowed_drivers)
def __init__(self, config): from lab.laboratory import Laboratory super(ProviderCobblerLab, self).__init__(config=config) self._lab = Laboratory(config_path=config['hardware-lab-config'])
def cmd(config_path): """fab cmd:g10\t\t\t\tRun single command on lab device. :param config_path: path to valid hardware lab configuration, usually one of yaml in $REPO/configs """ from fabric.operations import prompt from six import print_ from lab.laboratory import Laboratory from lab.deployers.deployer_existing import DeployerExisting from lab.logger import lab_logger l = Laboratory(config_path=config_path) nodes = sorted(map(lambda node: node.get_id(), l.get_nodes_by_class())) while True: device_name = prompt(text='{lab} has: "cloud" and:\n {nodes}\n(use "quit" to quit)\n node? '.format(lab=l, nodes=nodes)) if device_name == 'cloud': d = DeployerExisting({'cloud': config_path, 'hardware-lab-config': config_path}) device = d.wait_for_cloud([]) elif device_name in ['quit', 'q', 'exit']: return elif device_name not in nodes: print_(device_name, 'is not available') continue else: device = l.get_node_by_id(device_name) method_names = [x for x in dir(device) if not x.startswith('_')] print_(device, ' has: \n', '\n'.join(method_names), '\n(use "node" to get back to node selection)') while True: input_method_name = prompt(text='\n\n>>{0}<< operation?: '.format(device)) if input_method_name in ['quit', 'q', 'exit']: return elif input_method_name == 'node': break elif input_method_name in ['r', 'rpt']: print_(device, ' has: \n', '\n'.join(method_names), '\n(use "node" to get back to node selection)') continue else: methods_in_filter = filter(lambda mth: input_method_name in mth, method_names) if len(methods_in_filter) == 0: lab_logger.info('{} is not available'.format(input_method_name)) continue elif len(methods_in_filter) == 1: input_method_name = methods_in_filter[0] elif len(methods_in_filter) > 1: lab_logger.info('input "{}" matches:\n{}'.format(input_method_name, '\n'.join(methods_in_filter))) continue method_to_execute = getattr(device, input_method_name) parameters = method_to_execute.func_code.co_varnames[1:method_to_execute.func_code.co_argcount] arguments = [] for parameter in parameters: argument = prompt(text='{p}=? '.format(p=parameter)) if argument.startswith('['): argument = argument.strip('[]').split(',') elif argument in ['True', 'true', 'yes']: argument = True elif argument in ['False', 'false', 'no']: argument = False arguments.append(argument) # noinspection PyBroadException try: results = method_to_execute(*arguments) lab_logger.info('\n>>{}<< RESULTS:\n\n{}\n'.format(device, results)) except Exception as ex: lab_logger.exception('\n Exception: {0}'.format(ex))
def __init__(self, config): from lab.laboratory import Laboratory super(ProviderExistingLab, self).__init__() self.pod = Laboratory(cfg_or_path=config['hardware-lab-config'])
class RunnerCloud9(Runner): def sample_config(self): return {'yaml-path': 'yaml path'} def __init__(self, config): from lab.laboratory import Laboratory super(RunnerCloud9, self).__init__(config=config) self.lab = Laboratory(config_path=config['yaml-path']) self.director = self.lab.director() # def __assign_ip_to_user_nic(self, undercloud): # ssh = 'ssh -o StrictHostKeyChecking=no heat-admin@' # for server in self.lab.computes(): # line = self.director.run(command='source {rc} && nova list | grep {name}'.format(rc=undercloud, name=server.name())) # pxe_ip = line.split('=')[-1].replace(' |', '') # line = self.director.run("{s}{pxe_ip} /usr/sbin/ip -o l | awk '/:aa:/ {{print $2}}'".format(s=ssh, pxe_ip=pxe_ip)) # user_if = line.split('\n')[-1].strip(':') # self.director.run('{s}{pxe_ip} sudo ip a flush dev {user_if}'.format(s=ssh, pxe_ip=pxe_ip, user_if=user_if)) # self.director.run('{s}{pxe_ip} sudo ip a a {user_ip}/{bits} dev {user_if}'.format(s=ssh, pxe_ip=pxe_ip, user_if=user_if, user_ip=server.ip, bits=server.net.prefixlen)) # self.director.run('{s}{pxe_ip} sudo ip r r default via {user_gw} dev {user_if}'.format(s=ssh, pxe_ip=pxe_ip, user_gw=self.lab.user_gw, user_if=user_if)) # for server in self.lab.all_but_director(): # self.director.run('{s}{ip} \'echo "{public}" >> .ssh/authorized_keys\''.format(s=ssh, ip=server.ip, public=self.lab.public_key)) def __copy_stack_files(self, user): self.director.run(command='sudo cp /home/stack/overcloudrc .') self.director.run(command='sudo cp /home/stack/stackrc .') self.director.run(command='sudo cp /home/stack/.ssh/id_rsa* .', in_directory='.ssh') self.director.run(command='sudo chown {0}.{0} overcloudrc stackrc .ssh/*'.format(user)) self.director.run(command='cat id_rsa.pub >> authorized_keys', in_directory='.ssh') return '~/overcloudrc', '~/stackrc' def __prepare_sqe_repo(self): import os self.director.check_or_install_packages(package_names='python-virtualenv') sqe_repo = self.director.clone_repo(repo_url='https://github.com/cisco-openstack/openstack-sqe.git') sqe_venv = os.path.join('~/VE', os.path.basename(sqe_repo)) self.director.run(command='virtualenv {0}'.format(sqe_venv)) self.director.run(command='{0}/bin/pip install -r requirements.txt'.format(sqe_venv), in_directory=sqe_repo) return sqe_repo def __create_bashrc(self, sqe_repo): self.director.run(command='rm -f ~/.bashrc') self.director.run(command='ln -s {0}/configs/bashrc ~/.bashrc'.format(sqe_repo)) def __install_filebeat(self): for server in self.lab.controllers(): filebeat_config_body = ''' filebeat: prospectors: - paths: - /var/log/neutron/server.log input_type: log document_type: {document_type} output: logstash: hosts: ["{logstash}"] '''.format(logstash=self.lab.logstash_creds(), document_type=server.actuate_hostname()) filebeat = 'filebeat-1.0.0-x86_64.rpm' server.run(command='curl -L -O http://172.29.173.233/{0}'.format(filebeat)) server.run(command='sudo rpm --force -vi {0}'.format(filebeat)) server.put_string_as_file_in_dir(string_to_put=filebeat_config_body, file_name='filebeat.yml', in_directory='/etc/filebeat') server.run(command='sudo /etc/init.d/filebeat restart') server.run(command='sudo /etc/init.d/filebeat status') def enable_neutron_debug_verbose(self): for server in self.lab.controllers(): server.run("sed -i 's/^verbose = False/verbose = True/g' /etc/neutron/neutron.conf") server.run("sed -i 's/^debug = False/debug = True/g' /etc/neutron/neutron.conf") server.run("systemctl restart neutron-server") def run_on_director(self): user = '******' self.director.run(command='sudo rm -f /home/{0}/.bashrc'.format(user), warn_only=True) self.director.create_user(new_username=user) overcloud, undercloud = self.__copy_stack_files(user=user) self.director.run(command='ssh -o StrictHostKeyChecking=no localhost hostname') # self.__assign_ip_to_user_nic(undercloud=undercloud) self.__install_filebeat() self.enable_neutron_debug_verbose() undercloud_nodes = self.director.run(command='source {0} && nova list'.format(undercloud)) role_ip = [] counts = {'controller': 0, 'compute': 0} for line in undercloud_nodes.split('\n'): for role in ['controller', 'compute']: if role in line: ip = line.split('=')[-1].replace(' |', '') counts[role] += 1 role_ip.append('{role}-{n}:\n ip: {ip}\n user: heat-admin\n password: ""\n role: {role}'.format(role=role, n=counts[role], ip=ip)) sqe_repo = self.__prepare_sqe_repo() self.__create_bashrc(sqe_repo=sqe_repo) def execute(self, clouds, servers): super(RunnerCloud9, self).execute(clouds, servers)
def __call__(self, *args, **kwargs): from lab.cloud.openstack import OS from lab.laboratory import Laboratory mgm = Laboratory.create(lab_name=self.pod_name, is_mgm_only=True) return OS(name=self.pod_name, mediator=mgm, openrc_path='openrc')
def __init__(self, config): from lab.laboratory import Laboratory super(ProviderExistingLab, self).__init__(config=config) self._lab = Laboratory(config_path=config['hardware-lab-config']).get_director()
def deploy_cloud(self, list_of_servers): from lab.cloud.openstack import OS from fabric.operations import prompt from lab.laboratory import Laboratory lab = Laboratory(config_path=self._lab_path) build_node = lab.mgm mercury_tag = self._mercury_installer_location.split('/')[-1] self.log(message='Deploying {} on {}'.format(mercury_tag, build_node)) if self._type_of_install == 'iso': while True: ip, username, password = build_node.get_oob() ans = prompt('Run remote mounted ISO installation on http://{} ({}/{}) with RemoteShare={} RemoteFile=buildnode.iso, print FINISH when ready'.format(ip, username, password, self._mercury_installer_location)) if ans == 'FINISH': break lab.r_n9_configure(is_clean_before=True) build_node.r_configure_mx_and_nat() ans = build_node.exe('ls -d installer*', is_warn_only=True) if 'installer-' + mercury_tag in ans: installer_dir = ans build_node.exe(command='./unbootstrap.sh -y > /dev/null', in_directory=installer_dir, is_warn_only=True, estimated_time=100) is_get_tarball = False elif 'No such file or directory' in ans: installer_dir = 'installer-{}'.format(mercury_tag) is_get_tarball = True else: old_installer_dir = ans installer_dir = 'installer-{}'.format(mercury_tag) build_node.exe(command='./unbootstrap.sh -y > /dev/null', in_directory=old_installer_dir, is_warn_only=True, estimated_time=100) build_node.exe('rm -f openstack-configs', is_warn_only=True) build_node.exe('rm -rf {}'.format(old_installer_dir)) is_get_tarball = True if is_get_tarball: tar_url = self._mercury_installer_location + '/mercury-installer-internal.tar.gz' tar_path = build_node.r_get_remote_file(url=tar_url) build_node.exe('tar xzf {}'.format(tar_path)) build_node.exe('rm -f {}'.format(tar_path)) ans = build_node.exe('cat /etc/cisco-mercury-release', is_warn_only=True) if self._is_force_redeploy or mercury_tag not in ans: cfg_body = lab.create_mercury_setup_data_yaml(is_add_vts_role=self._is_add_vts_role) build_node.r_put_string_as_file_in_dir(string_to_put=cfg_body, file_name='setup_data.yaml', in_directory=installer_dir + '/openstack-configs') build_node.exe("find {} -name '*.pyc' -delete".format(installer_dir)) build_node.exe('rm -rf /var/log/mercury/*') try: build_node.exe(command='./runner/runner.py -y -s 7,8 > /dev/null', in_directory=installer_dir, estimated_time=4000) # run steps 1-6 during which we get all control and computes nodes re-loaded except: build_node.exe('cat /var/log/mercury/installer/*') raise RuntimeError('Mercury ./runner/runner.py -y -s 7,8 failed') if not self._is_add_vts_role: cobbler = lab.get_cobbler() cobbler.cobbler_deploy() self._vts_deployer.execute({'servers': lab.get_vts_hosts(), 'clouds': []}) try: build_node.exe(command='./runner/runner.py -y -p 7,8 > /dev/null', in_directory=installer_dir, estimated_time=600) # run steps 7-8 except: build_node.exe('cat /var/log/mercury/installer/*') raise RuntimeError('Mercury ./runner/runner.py -y -p 7,8 failed') build_node.exe(command='echo {} > /etc/cisco-mercury-release'.format(mercury_tag)) lab.r_collect_information(regex='ERROR', comment='after mercury runner') openrc_body = build_node.exe(command='cat openstack-configs/openrc') return OS.from_openrc(name=self._lab_path.strip('.yaml'), mediator=build_node, openrc_as_string=openrc_body)
def __init__(self, config): from lab.laboratory import Laboratory super(ProviderCobblerLab, self).__init__(config=config) self._lab = Laboratory(config_path=config['hardware-lab-config'])
def __init__(self, lab_name, allowed_drivers): from lab.laboratory import Laboratory self.pod = Laboratory.create(lab_name=lab_name, allowed_drivers=allowed_drivers)
class FakeCloud(object): pod = Laboratory()
class RunnerCloud9(Runner): def sample_config(self): return {'yaml-path': 'yaml path'} def __init__(self, config): from lab.laboratory import Laboratory super(RunnerCloud9, self).__init__(config=config) self.lab = Laboratory(config_path=config['yaml-path']) self.director = self.lab.director() # def __assign_ip_to_user_nic(self, undercloud): # ssh = 'ssh -o StrictHostKeyChecking=no heat-admin@' # for server in self.lab.computes(): # line = self.director.run(command='source {rc} && nova list | grep {name}'.format(rc=undercloud, name=server.name())) # pxe_ip = line.split('=')[-1].replace(' |', '') # line = self.director.run("{s}{pxe_ip} /usr/sbin/ip -o l | awk '/:aa:/ {{print $2}}'".format(s=ssh, pxe_ip=pxe_ip)) # user_if = line.split('\n')[-1].strip(':') # self.director.run('{s}{pxe_ip} sudo ip a flush dev {user_if}'.format(s=ssh, pxe_ip=pxe_ip, user_if=user_if)) # self.director.run('{s}{pxe_ip} sudo ip a a {user_ip}/{bits} dev {user_if}'.format(s=ssh, pxe_ip=pxe_ip, user_if=user_if, user_ip=server.ip, bits=server.net.prefixlen)) # self.director.run('{s}{pxe_ip} sudo ip r r default via {user_gw} dev {user_if}'.format(s=ssh, pxe_ip=pxe_ip, user_gw=self.lab.user_gw, user_if=user_if)) # for server in self.lab.all_but_director(): # self.director.run('{s}{ip} \'echo "{public}" >> .ssh/authorized_keys\''.format(s=ssh, ip=server.ip, public=self.lab.public_key)) def __copy_stack_files(self, user): self.director.exe(command='sudo cp /home/stack/overcloudrc .') self.director.exe(command='sudo cp /home/stack/stackrc .') self.director.exe(command='sudo cp /home/stack/.ssh/id_rsa* .', in_directory='.ssh') self.director.exe(command='sudo chown {0}.{0} overcloudrc stackrc .ssh/*'.format(user)) self.director.exe(command='cat id_rsa.pub >> authorized_keys', in_directory='.ssh') return '~/overcloudrc', '~/stackrc' def __prepare_sqe_repo(self): import os self.director.check_or_install_packages(package_names='python-virtualenv') sqe_repo = self.director.clone_repo(repo_url='https://github.com/cisco-openstack/openstack-sqe.git') sqe_venv = os.path.join('~/VE', os.path.basename(sqe_repo)) self.director.exe(command='virtualenv {0}'.format(sqe_venv)) self.director.exe(command='{0}/bin/pip install -r requirements.txt'.format(sqe_venv), in_directory=sqe_repo) return sqe_repo def __create_bashrc(self, sqe_repo): self.director.exe(command='rm -f ~/.bashrc') self.director.exe(command='ln -s {0}/configs/bashrc ~/.bashrc'.format(sqe_repo)) def __install_filebeat(self): for server in self.lab.controllers(): filebeat_config_body = ''' filebeat: prospectors: - paths: - /var/log/neutron/server.log input_type: log document_type: {document_type} output: logstash: hosts: ["{logstash}"] '''.format(logstash=self.lab.logstash_creds(), document_type=server.actuate_hostname()) filebeat = 'filebeat-1.0.0-x86_64.rpm' server.exe(command='curl -L -O http://172.29.173.233/{0}'.format(filebeat)) server.exe(command='sudo rpm --force -vi {0}'.format(filebeat)) server.r_put_string_as_file_in_dir(string_to_put=filebeat_config_body, file_name='filebeat.yml', in_directory='/etc/filebeat') server.exe(command='sudo /etc/init.d/filebeat restart') server.exe(command='sudo /etc/init.d/filebeat status') def enable_neutron_debug_verbose(self): for server in self.lab.controllers(): server.exe("sed -i 's/^verbose = False/verbose = True/g' /etc/neutron/neutron.conf") server.exe("sed -i 's/^debug = False/debug = True/g' /etc/neutron/neutron.conf") server.exe("systemctl restart neutron-server") def run_on_director(self): user = '******' self.director.exe(command='sudo rm -f /home/{0}/.bashrc'.format(user), warn_only=True) self.director.r_create_user(new_username=user) overcloud, undercloud = self.__copy_stack_files(user=user) self.director.exe(command='ssh -o StrictHostKeyChecking=no localhost hostname') # self.__assign_ip_to_user_nic(undercloud=undercloud) self.__install_filebeat() self.enable_neutron_debug_verbose() undercloud_nodes = self.director.exe(command='source {0} && nova list'.format(undercloud)) role_ip = [] counts = {'controller': 0, 'compute': 0} for line in undercloud_nodes.split('\n'): for role in ['controller', 'compute']: if role in line: ip = line.split('=')[-1].replace(' |', '') counts[role] += 1 role_ip.append('{role}-{n}:\n ip: {ip}\n user: heat-admin\n password: ""\n role: {role}'.format(role=role, n=counts[role], ip=ip)) sqe_repo = self.__prepare_sqe_repo() self.__create_bashrc(sqe_repo=sqe_repo) def execute(self, clouds, servers): super(RunnerCloud9, self).execute(clouds, servers) return True