def cli_boot_factory_default(self, node, timeout=360): """ Runs boot factory-default. This will cause the SSH connection to disappear and the session would need to be restarted. """ t = test.Test() n = t.node(node) if helpers.is_bigtap(n.platform()): helpers.log("Boot factory-default on '%s' (Big Tap Controller)" % node) n.enable("boot factory-default", prompt=r'Do you want to continue \[no\]\? ') n.enable("yes", prompt='Enter NEW admin password: '******'Repeat NEW admin password: '******'UNAVAILABLE localhost') elif helpers.is_bvs(n.platform()): helpers.log("Boot factory-default on '%s' (BVS)" % node) helpers.summary_log( 'BVS boot factory may take a bit of time. Setting timeout to %s seconds.' % timeout) # vui-bvs> enable # vui-bvs# boot factory-default # boot factory default: will over-write the alternate partition # proceed ("yes" or "y" to continue): y # boot factory default: loading image into stage partition # boot factory default: checking integrity of new partition # boot factory default: New Partition Ready # factory default: ready for reboot # boot factory default: reboot? ("yes" or "y" to continue): y # # Broadcast message from root@blah # (unknown) at 20:32 ... # # The system is going down for reboot NOW! # Connection to 10.192.104.2 closed by remote host. # Connection to 10.192.104.2 closed. n.enable('') n.send('boot factory-default') n.expect(r'proceed \("y" or "yes" to continue\)') n.send('y') n.expect(r'copying image into alternate partition', timeout=timeout) n.expect(r'checking integrity of new partition', timeout=timeout) n.expect(r'New Partition Ready', timeout=timeout) n.expect(r'ready for reboot', timeout=timeout) n.expect(r'"y" or "yes" to continue\): ', timeout=timeout) n.send('y') # n.expect(r'system is going down for reboot') helpers.summary_log("'%s' has been rebooted." % node) else: helpers.test_error( "Boot factory-default is only supported on 'bvs' and 'bigtap'") # At this point, device is rebooted and we lose the session handle. # Connect to device console to complete first-boot. helpers.log( "Boot factory-default completed on '%s'. System should be rebooting." % node) return True
def create_vm_on_kvm_host(self, **kwargs): vm_name = kwargs.get("vm_name", None) vm_type = kwargs.get("vm_type", "bcf") kvm_vmdk_path = kwargs.get("qcow_path", None) kvm_handle = kwargs.get("kvm_handle", None) kvm_host = kwargs.get("kvm_host", None) network_interface = kwargs.get("network_interface", None) vm_ram = kwargs.get("vm_ram", "2048") vm_creation = False if vm_type == "mininet": vm_creation = self._virt_install_vm( kvm_handle=kvm_handle, disk_path=kvm_vmdk_path, vm_name=vm_name, network_interface=network_interface) else: vm_creation = self._virt_install_vm( kvm_handle=kvm_handle, disk_path=kvm_vmdk_path, vm_name=vm_name, ram=vm_ram, cpus="8", network_interface=network_interface) if vm_creation: helpers.summary_log( "2. Success Creating VM with Name: %s on KVM_Host: %s" % (vm_name, kvm_host)) helpers.log("Current Running VMs on KVM_HOST : \n %s" % kvm_handle.bash('sudo virsh list --all')['content']) else: helpers.summary_log("FAILURE CREATING VM :( Need to debug ...")
def vm_teardown(self, vm_name, kvm_host=KVM_SERVER, kvm_user=KVM_USER, kvm_password=KVM_PASSWORD): result = { "vm_name": vm_name, "status_code": True, "status_descr": "Success", } try: kvm_handle = self._connect_to_kvm_host(hostname=kvm_host, user=kvm_user, password=kvm_password) vm_state = self._get_vm_running_state(kvm_handle=kvm_handle, vm_name=vm_name) helpers.summary_log("vm_State: %s\n" % str(vm_state)) if re.match(r'.*running.*', vm_state) or re.match( r'.*paused.*', vm_state): helpers.summary_log( "Tearing down VM with Name:%s on kvm host: %s" % (vm_name, kvm_host)) self._destroy_vm(kvm_handle=kvm_handle, vm_name=vm_name) self._undefine_vm(kvm_handle=kvm_handle, vm_name=vm_name) helpers.log("Checking The State of Vm : %s" % vm_name) new_vm_state = self._get_vm_running_state( kvm_handle=kvm_handle, vm_name=vm_name) helpers.log(" new vm_state : %s" % new_vm_state) if new_vm_state != '': helpers.log("Vm still alive trying to destroy again..") self.vm_teardown(vm_name, kvm_host, kvm_user, kvm_password) else: helpers.log("Vm Is Dead!") elif re.match(r'.*shut.*', vm_state): helpers.summary_log( "Deleting down the VM : %s on KVM_HOST: %s" % (vm_name, kvm_host)) self._undefine_vm(kvm_handle=kvm_handle, vm_name=vm_name) else: helpers.summary_log( "VM with given name %s doesn't exists on KVM host! %s" % (vm_name, kvm_host)) result['status_code'] = False result['status_descr'] = "VM name doesn't exist on KVM host" return result self._delete_vm_storage_file(kvm_handle=kvm_handle, vm_name=vm_name) return result except: inst = helpers.exception_info_traceback() helpers.log("Exception Details:\n%s" % inst) result['status_code'] = False result['status_descr'] = inst return result
def _configure_vm_first_boot(self, cluster_ip=None, ip_address=None, netmask='18', vm_host_name=None, gateway=DEFAULT_GATEWAY): # Using Mingtao's First Boot Function to configure spawned VM in KVM helpers.log("Sleeping 60 sec while waiting for VM to boot up") time.sleep(120) helpers.log("Success setting up gobot Env!") t5_platform = T5Platform() # configure firstboot till IP address if ip_address is not None: helpers.summary_log( "Static IP is given using ip: %s netmask: %s, gateway: %s for VM" % (ip_address, netmask, gateway)) t5_platform.first_boot_controller_initial_node_setup( "c1", ip_address=ip_address, netmask=netmask, hostname=vm_host_name, gateway=gateway) else: t5_platform.first_boot_controller_initial_node_setup( "c1", dhcp="yes", hostname=vm_host_name) # Apply setting and add cluster Ip if provided if cluster_ip is not None: t5_platform.first_boot_controller_initial_cluster_setup( "c1", join_cluster="yes", cluster_ip=cluster_ip) helpers.summary_log("Success Adding BVS VM to Cluster!") else: t5_platform.first_boot_controller_initial_cluster_setup("c1") new_ip_address = t5_platform.first_boot_controller_menu_apply("c1") helpers.summary_log( "3. Success configuring first boot Controller IP : %s" % str(new_ip_address)) return new_ip_address
def cli_upgrade_launch_break(self, breakpoint=None,node='master',option=''): ''' upgrade launch break - break out of the upgrade at various point Author: Mingtao input: node - controller master, slave, c1 c2 option - revert, suspend breakpoint - None: no break proceed: send no when proceed is prompt upgrade: send no when upgrade is prompt phase1: send ctrl c during phase1 usage: output: True - upgrade launched successfully False -upgrade launched Not successfully ''' t = test.Test() c = t.controller(node) bsn = BsnCommon.BsnCommon() helpers.log('INFO: Entering ==> cli_upgrade_launch ') c.config('') string = 'upgrade launch ' + option # c.send('upgrade launch') c.send(string) c.expect(r'[\r\n].+ \("yes" or "y" to continue\):', timeout=180) content = c.cli_content() helpers.log("*****USER INFO:\n%s" % content) if breakpoint == 'proceed': c.send('no') helpers.log("USER INFO: terminate upgrade at proceed: %s" % node) return True else: c.send("yes") options = c.expect([r'fabric is redundant', r'.* HITFULL upgrade \(y or yes to continue\):']) content = c.cli_content() helpers.log("USER INFO: the content: %s" % content) if options[0] == 1: if breakpoint == 'upgrade': c.send("no") helpers.log("USER INFO: terminate upgrade at upgrade: %s" % node) return True else: c.send("yes") if breakpoint is None: try: c.expect(r'[\r\n].+[R|r]ebooting.*') content = c.cli_content() helpers.log("*****Output is :\n%s" % content) except: helpers.log('ERROR: upgrade launch NOT successfully') return False else: helpers.log('INFO: upgrade launch successfully') return True else: # need to split for master or standby if node == 'master': role = 'active' elif node == 'slave': role = 'stand-by' else: role = bsn.rest_get_node_role(node) if role == 'active': helpers.log("USER INFO: %s is %s \n%s" % (node, role )) c.expect(r'waiting for standby to begin \"upgrade launch\"',timeout=360) c.expect(r'config updates are frozen for update',timeout=360) c.expect(r'standby has begun upgrade',timeout=360) c.expect(r'waiting for standby to complete switch handoff',timeout=360) c.expect(r'waiting for upgrade to complete \(remove-standby-controller-config-completed\)',timeout=360) c.expect(r'new state: phase-1-migrate',timeout=360) if breakpoint == 'phase1': c.send(helpers.ctrl('c')) helpers.summary_log('Ctrl C is hit during phase-1-migrate') return True c.expect(r'waiting for upgrade to complete \(phase-1-migrate\)',timeout=360) c.expect(r'new state: phase-2-migrate',timeout=360) if breakpoint == 'phase2': c.send(helpers.ctrl('c')) helpers.summary_log('Ctrl C is hit during phase-1-migrate') return True c.expect(r'waiting for upgrade to complete \(phase-2-migrate\)',timeout=360) c.expect(r'The system is going down for reboot NOW!',timeout=360) return True elif role == 'stand-by': c.expect(r'waiting for active to begin \"upgrade launch\"',timeout=360) c.expect(r'upgrader nonce',timeout=360) c.expect(r'Leader->begin-upgrade-old state: begin-completed',timeout=360) c.expect(r'Leader->partition state: partition-completed',timeout=360) c.expect(r'Leader->remove-standby-controller-config state: remove-standby-controller-config-completed',timeout=360) c.expect(r'[R|r]ebooting',timeout=360) return True
def vm_setup(self, **kwargs): result = { "status_code": True, "status_descr": "Success", } try: vm_name = kwargs.get("vm_name", None) kvm_host = kwargs.get("kvm_host", KVM_SERVER) kvm_user = kwargs.get("kvm_user", KVM_USER) kvm_password = kwargs.get("kvm_password", KVM_PASSWORD) vm_host_name = kwargs.get("vm_host_name", None) vm_type = kwargs.get("vm_type", "bcf") qcow_path = kwargs.get("qcow_path", None) qcow_vm_path = None ip = kwargs.get("ip", None) vm_ram = kwargs.get("vm_ram", "2048") build_number = kwargs.get("build_number", None) if ip == 'None': ip = None cluster_ip = kwargs.get("cluster_ip", None) netmask = kwargs.get("netmask", "18") gateway = kwargs.get("gateway", "10.8.0.1") network_interface = kwargs.get("network_interface", "br0") self.log_path = LOG_BASE_PATH + '/' + vm_name try: if os.path.exists(self.log_path) or os.path.islink( self.log_path): pass else: os.makedirs(self.log_path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(LOG_BASE_PATH): pass else: # Last resort - put logs in /tmp self.log_path = '/tmp' + '/' + vm_name os.makedirs(self.log_path) # export IS_GOBOT="False" helpers.set_env("AUTOBOT_LOG", "%s/%s.log" % (self.log_path, vm_name)) helpers.bigrobot_log_path_exec_instance(self.log_path) # Note: helpers.summary_log() and helpers.log() are not called # until after we've initialized the BigRobot log path # (above). Don't attempt to write to logs before that # or it will write logs to /tmp directory instead of the # /tmp/<vm_name>/. helpers.summary_log("Creating VM with Name: %s " % vm_name) helpers.summary_log("Created log_path %s" % self.log_path) # remote_qcow_bvs_path = kwargs.get("remote_qcow_bvs_path", "/var/lib/jenkins/jobs/bvs\ master/lastSuccessful/archive/target/appliance/images/bcf/controller-bcf-2.0.8-SNAPSHOT.qcow2") remote_qcow_bvs_path = kwargs.get( "remote_qcow_bvs_path", "/var/lib/jenkins/jobs/bcf_master/lastSuccessful/archive/controller-bcf-*.qcow2" ) remote_qcow_mininet_path = kwargs.get( "remote_qcow_mininet_path", "/var/lib/jenkins/jobs/t6-mininet-vm/builds/lastSuccessfulBuild/archive/t6-mininet-vm/ubuntu-kvm/t6-mininet.qcow2" ) topo_file = self._create_temp_topo(kvm_host=kvm_host, vm_name=vm_name) # set the BIG ROBOT Topo file for console connections helpers.bigrobot_topology(topo_file) helpers.bigrobot_params("none") kvm_handle = self._connect_to_kvm_host(hostname=kvm_host, user=kvm_user, password=kvm_password) if vm_name in kvm_handle.bash('sudo virsh list --all')['content']: helpers.summary_log( "VM with given name %s already exists in KVM Host %s" % (vm_name, kvm_host)) return False if qcow_path is not None: helpers.log( "QCOW path is provided using it locally NO SCP just copy to images.." ) qcow_vm_path = self._cp_qcow_to_images_folder( kvm_handle=kvm_handle, qcow_path=qcow_path, vm_name=vm_name) else: helpers.log( "no VMDK path is given copying from latest bvs build from jenkins server" ) if vm_type == 'mininet': helpers.log( "Scp'ing Latest Mininet qcow file from jenkins to kvm Host.." ) qcow_vm_path = self._scp_file_to_kvm_host( kvm_handle=kvm_handle, remote_qcow_path=remote_qcow_mininet_path, vm_type='mininet', vm_name=vm_name, build_number=build_number) else: helpers.log( "Scp'ing Latest BVS qcow file %s from jenkins to kvm Host.." % remote_qcow_bvs_path) qcow_vm_path = self._scp_file_to_kvm_host( kvm_handle=kvm_handle, remote_qcow_path=remote_qcow_bvs_path, vm_name=vm_name, build_number=build_number) helpers.log("Creating VM on KVM Host with Name : %s " % vm_name) self.create_vm_on_kvm_host(vm_type=vm_type, qcow_path=qcow_vm_path, vm_name=vm_name, kvm_handle=kvm_handle, kvm_host=kvm_host, network_interface=network_interface, vm_ram=vm_ram) result['vm_name'] = vm_name result['kvm_host'] = kvm_host result['image_path'] = qcow_vm_path result['vm_ip'] = ip # result['content'] = helpers.file_read_once("%s/%s.log" # % (self.log_path, # vm_name)) if vm_type == 'mininet': # FIX ME configure mininet with user specified ip / return the DHCP ip of mininet VM helpers.log("Success Creating Mininet vm!!") helpers.log("Configuring IP for mininet if provided") result['vm_ip'] = self.set_mininet_ip(node="c1", ip=ip, get_ip=True) return result # For controller, attempt First Boot helpers.log("SLeep another 60 sec for controller to boot up..") time.sleep(30) result['vm_ip'] = self._configure_vm_first_boot( cluster_ip=cluster_ip, ip_address=ip, netmask=netmask, vm_host_name=vm_host_name, gateway=gateway) helpers.summary_log( "Copying firstboot-config on New Controller: %s" % result['vm_ip']) helpers.sleep(10) bvs = ControllerDevConf(host=result['vm_ip'], user="******", password="******", name="test-bvs") bvs.config("copy running-config snapshot://firstboot-config") helpers.summary_log("Success saving firstboot-config") helpers.summary_log("Done! Logs are written to %s" % self.log_path) return result except: inst = helpers.exception_info_traceback() helpers.log("Exception Details:\n%s" % inst) result['status_code'] = False result['status_descr'] = inst return result
def _scp_file_to_kvm_host(self, vm_name=None, remote_qcow_path=None, kvm_handle=None, vm_type="bcf", build_number=None, scp=True): # for getting the latest jenkins build from jenkins server kvm_host ssh key should be copied to jenkins server output = kvm_handle.bash('uname -a') helpers.log("KVM Host Details : \n %s" % output['content']) kvm_handle.bash('cd /var/lib/libvirt/') helpers.log(" GOT VM_TYPE : %s" % vm_type) if "No such file or directory" in kvm_handle.bash( 'cd bvs_images/')['content']: helpers.log( "No BVS_IMAGES dir in KVM Host @ /var/lib/libvirt creating one to store bvs vmdks" ) kvm_handle.bash('sudo mkdir bvs_images') kvm_handle.bash('sudo chmod -R 777 bvs_images/') else: helpers.log( 'BVS_IMAGES dir exists in KVM Host @ /var/lib/libvirt/bvs_images/ copying latest vmdkd from jenkins server' ) kvm_handle.bash('sudo chmod -R 777 ../bvs_images/') kvm_handle.bash('cd bvs_images') helpers.log("Latest VMDK will be copied to location : %s at KVM Host" % kvm_handle.bash('pwd')['content']) helpers.log("Executing Scp cmd to copy latest bvs vmdk to KVM Server") jenkins_project_name = None if remote_qcow_path is not None: match = re.match(r'/var/lib/jenkins/jobs/(.*)/lastSuccessful/', remote_qcow_path) if match: jenkins_project_name = match.group(1) helpers.summary_log("Using Jenkins Project Name: %s" % jenkins_project_name) latest_build_number = self._get_latest_jenkins_build_number( vm_type, jenkins_project_name=jenkins_project_name) latest_kvm_build_number = self._get_latest_kvm_build_number( vm_type, kvm_handle, jenkins_project_name=jenkins_project_name) if build_number is not None: helpers.log( "Build Number is provided resetting latest builds to %s" % build_number) latest_build_number = build_number latest_kvm_build_number = build_number file_name = None if vm_type == 'bcf': if jenkins_project_name == "bcf_master": file_name = "controller-jf_bcf_virtual-%s.qcow2" % ( latest_build_number) else: file_name = "controller-%s_virtual-%s.qcow2" % ( jenkins_project_name, latest_build_number) helpers.log("Adding virtual tag to build file Name : %s" % file_name) elif vm_type == 'mininet': file_name = "mininet-%s.qcow2" % latest_build_number helpers.log("Latest Build Number on KVM Host: %s" % latest_kvm_build_number) helpers.log("Latest Build Number on Jenkins: %s" % latest_build_number) if str(latest_kvm_build_number) == str(latest_build_number) and scp: helpers.log( "Skipping SCP as the latest build on jenkins server did not change from the latest on KVM Host" ) else: scp_cmd = ( 'scp -o "UserKnownHostsFile=/dev/null" -o StrictHostKeyChecking=no "%s@%s:%s" %s' % (JENKINS_USER, JENKINS_SERVER, remote_qcow_path, file_name)) helpers.log("SCP command arguments:\n%s" % scp_cmd) scp_cmd_out = kvm_handle.bash( scp_cmd, prompt=[r'.*password:'******'.*#', r'.*$ '])['content'] if "password" in scp_cmd_out: helpers.log("sending bsn password..") helpers.log(kvm_handle.bash('bsn')['content']) else: helpers.log("SCP should be done:\n%s" % scp_cmd_out) helpers.summary_log("Success SCP'ing latest Jenkins build !!") helpers.summary_log("Using Jenkins Build #%s (image name: '%s')" % (latest_build_number, file_name)) helpers.log("Setting BUILD_NUM Env...") helpers.set_env("BUILD_NUM", str(latest_build_number)) helpers.log("env BUILD_NUM: %s" % helpers.get_env("BUILD_NUM")) kvm_handle.bash('sudo cp %s ../images/%s.qcow2' % (file_name, vm_name)) local_qcow_path = "/var/lib/libvirt/images/%s.qcow2" % vm_name # vm_name = "%s_BVS" % current_user return local_qcow_path
def __init__(self, name, ip, user=None, password=None, t=None, protocol=None, no_ping=False, devconf_debug_level=0): if not name: helpers.environment_failure("Node name is not defined") self._name = name self._user = user self._password = password self._ip = None self._console_info = None self.http_port = None self.base_url = None self.t = t self.params = t.topology_params() self.is_pingable = False self.rest = None # REST handle self.dev = None # DevConf handle (SSH) self.dev_console = None # Console handle self.dev_debug_level = 0 # If name are in the form 'node-<ip_addr>', e.g., 'node-10.193.0.43' # then they are nodes spawned directly by the user. Don't try to # look up their attributes in params since they are not defined. if self.params and not name.startswith('node-'): self.node_params = self.params[name] val = helpers.params_val('set_devconf_debug_level', self.node_params) if val is not None: self.dev_debug_level = val helpers.log("Devconf for '%s' set to debug level %s" % (name, self.dev_debug_level)) else: self.node_params = {} if devconf_debug_level > 0: # override set_devconf_debug_level self.dev_debug_level = devconf_debug_level self._port = self.node_params.get('port', None) self._protocol = protocol if protocol else self.node_params.get( 'protocol', 'ssh') self._privatekey = self.node_params.get('privatekey', None) self._privatekey_password = self.node_params.get( 'privatekey_password', None) self._privatekey_type = self.node_params.get('privatekey_type', None) self._hostname = self.node_params.get('hostname', None) if not ip: if helpers.params_is_false('set_session_ssh', self.node_params): # set_session_ssh is False, so IP address doesn't have to be # defined pass else: helpers.environment_failure( "Node IP address is not defined for '%s'" % name) else: self._ip = ip.lower() # IP might be 'dummy' if self.ip() == 'dummy': helpers.environment_failure("IP address for '%s' is 'dummy'." " Needs to be populated." % self.name()) if helpers.is_esb(): helpers.summary_log("ESB environment - bypassing initial ping") elif no_ping: pass elif helpers.params_is_false('set_init_ping', self.node_params): helpers.log("'set_init_ping' is disabled for '%s', bypassing" " initial ping" % name) else: self.pingable_or_die() if name not in _active_nodes: _active_nodes.append(name)