def stop_multicast(self, pid, process_file): """ method used to stop the multicast process in the background. param : pid : process id : process_file : process file return: a tuple of Ture and None on success, False and error message on failure example : stop_multicast(pid, process_file) """ logger.info("\nIn subroutine: " + sys._getframe().f_code.co_name) output = self.kill_background(pid) if re.search(r'No such process', output) is not None: return (False, output) self.remove_file(process_file) cmd_route = "route del dvmrp.mcast.net" output = self._access.shell(cmd_route) if re.search(r'Unknown host', output[0]) is not None: logger.error("Failed to del route") return (False, output[0]) search_expr = 'iperf | grep {}'.format(pid) output = self.get_pid(search_expr) if output: logger.error("Can not stop multicast") return (False, output) logger.info("Successfully stopped multicast") return (True, None)
def _va_get_full_exe_name_from_core(self, corefile, *args, **kwargs): """ get bin file according to core file kwargs: corefile: core file name Return : bin file. for example /opt/varmour/bin/sw_node False : Not found bin file according to corefile """ info = re.search(r'/usr-varmour/core/([a-zA-Z0-9_]+)', corefile, re.M|re.I) try: exefile = info.group(1) except Exception as err: logger.error(err) return False if (exefile == "sshd"): exefile = "/usr/sbin/sshd" else: exefile = "/opt/varmour/bin/%s" % exefile #check exefile if it exist. rt_val = self._access.va_shell('ls {}'.format(exefile)) if re.search(r'No such file or directory',rt_val,re.M|re.I) is not None : logger.error('Not found the bin file {}'. format(exefile)) return False else : logger.info("exec file is {}".format(exefile)) return(exefile)
def start(self): """ method used to start the Ssh traffic process in the background on the client. It does a clean up of file if existing locally. It launches the lssh client tool in the background, and gets the corresponding pid. If it cannot retrieve the pid it means the transfer is already complete or the process launch failed. """ cmd = self._ssh_cmd if not cmd: logger.info('Command is not passed ') pass # TODO: raise exception or warning logger.info('Client' + str(self._client)) shell = LinuxShell(host=self._client, user=self._ssh_userid, pswd=self._ssh_passwd) output = shell.exec_command(cmd) logger.info('Command execution Output :\n' + str(output)) # TODO output = shell.exec_command('ps -ef | grep sshpass') logger.info('check the process is running or not in below \ output :\n' + str(output)) if self._ssh_userid in str(output): logger.info('SSH session started sucessfully \ output : \n' + str(output)) return True, output else: logger.error("SSH session NOT started sucessfully") return False, None
def va_get_segmentation(self): """ Generates a list of segentations data from test_param and topo file. Parameter : None Returns: :tuple: Succeed : True,segmentation Failure: Failure,error_message """ test_info = self._top_params if 'Segmentations' in test_info : segmentation = copy.deepcopy(test_info.get('Segmentations')) else : error_msg = 'Not found Segmentation data in {} file'.format(self.topology_yaml) logger.error(error_msg) return False,error_msg if 'Segmentation' in self._test_params.get('user_params'): segmentation_test_param = self._test_params.get('user_params').get('Segmentation') else : error_msg = 'Not found Segmentation data in {} file'.format(self.test_yaml) logger.error(error_msg) return False,error_msg for each_seg_info in segmentation : segments =each_seg_info.get('segments') for segment in segments: segment_name = segment.get('segment') segment['segment'] = segmentation_test_param.get(segment_name) return True,segmentation
def stop(self, *args, **kwargs): """ method to stop the icmp traffic process spawned by the start method. It checks if the ping is complete. It does a post transfer cleanup, and sets the traffic started boolean to false. It calls the del function to de-link references to the instances created by the traffic object, this also forces any open paramiko channels to the remote devices to be closed. """ client = self._conf_client if self._stats == 'started': logger.info('Time to ping: {} seconds'.format(self._timeout)) sleeptime = 1 elapsedtime = 0 while self._stats != 'completed' and elapsedtime <= self._timeout: time.sleep(sleeptime) self.get_stats() elapsedtime += sleeptime if self._stats != 'completed': logger.error('Icmp traffic not finished') else: logger.info('Icmp traffic finished') if self._outfile is not None: self.get_stats() try: client.exec_command('sudo rm -f {}'.format(self._outfile)) except AttributeError as e: logger.warning(e) super(Icmp, self).__del__()
def remove_vm_vnic(self, vmname, interface): """ method to removing vnic from vm. Args: :vmname (str) :name of vm that user wants to abstract from vm object list Return: :result (bool) True: If Successfully vnic removed. False: If Fail to remove vnic. """ try: vm_obj = self._get_vm_object_by_name(vmname) nic_change = [] for device in vm_obj.config.hardware.device: if isinstance(device, vim.vm.device.VirtualEthernetCard): if device.deviceInfo.label == interface: nic_card_spec = vim.vm.device.VirtualDeviceSpec() nic_card_spec.operation = \ vim.vm.device.VirtualDeviceSpec.Operation.remove nic_card_spec.device = device nic_change.append(nic_card_spec) if nic_change != []: delete_nic_config_spec = \ vim.vm.ConfigSpec(deviceChange=nic_change) delete_nic_task = \ vm_obj.ReconfigVM_Task(spec=delete_nic_config_spec) self._wait_for_tasks([delete_nic_task], "delete nic") else: logger.error("VM: {} has no interface name {}. " "Please Check Config and Try to change " "back".format(vmname, interface)) raise VmNicNotAvailable(vmname, interface) except vmodl.MethodFault as error: logger.error("Exception raised {}".format(error.msg))
def stop(self): """ method to stop the ssh traffic process spawned by the stop method. """ client = self._conf_client if self._stats == 'started': transfer_time = int(self._timeout) logger.info('Time to transfer: {} seconds'.format(transfer_time)) sleeptime = 10 elapsedtime = 0 while self._stats != 'completed' and elapsedtime <= transfer_time: if int(transfer_time / 60) != 0: sleeptime = sleeptime * int(transfer_time / 60) time.sleep(sleeptime) self.get_stats() elapsedtime += sleeptime sleeptime += 10 if self._stats != 'completed': logger.error('Ssh traffic not finished') else: logger.info('Ssh traffic finished') if self._outfile is not None: self.get_stats() try: client.exec_command('sudo rm -f {}'.format(self._outfile)) except AttributeError as e: logger.warning(e) super(Ssh, self).__del__()
def get_sw_name(self): """ Get all switch's information Kwargs: none Returns: a list of vswitch name """ #self.log_command(self.list_vswitch_cmd) results = self._shell.exec_command(self.list_vswitch_cmd) result = results[0] logger.debug(result) buffer = re.sub("\r\n", "\n", result) linelist = re.split("\n", buffer) i = 0 switch_name = [] for line in linelist: pattern = re.compile( r'Switch Name\s+Num Ports\s+Used Ports\s+Configured Ports\s+MTU\s+Uplinks' ) info = pattern.search(line) i = i + 1 if info: nextline = linelist[i] swinfo = re.match(r'(\w+)\s+', nextline) if swinfo is not None: name = swinfo.group(1) switch_name.append(name) if len(switch_name) == 0: logger.error("Failed to get vswitch name") return False logger.info("Succeed to get vswitch name") return (switch_name)
def power_off_vm(self, vmname): """ method to power off given VM. Args: :vmname (str) :name of vm that user wants to abstract from vm object list Return: :result (bool) True: If Successfully OFF. False: If Fail to VM Power OFF. Raise: vMNotFound Exception. """ try: vm_obj = self._get_vm_object_by_name(vmname) logger.info("VM {} is \ {}".format(vmname, vm_obj.runtime.powerState)) if (self.is_vm_power_on(vm_obj.name)): power_off_vm_task = vm_obj.PowerOffVM_Task() return self._wait_for_tasks([power_off_vm_task], task_name="Power Off VM") else: logger.warning("VM {} is already powered off, \ Can not perform this operation".format(vmname)) except vmodl.MethodFault as error: logger.error("Exception raise {}".format(error.msg)) return error.msg
def _enable_cluster_drs(self, clustername, vmotion_rate=3): """ method that enable cluster drs . Args: :hostname (str): name of host :clustername (str): name of DRS cluster where user wants to create vm rule. :groupname(str): name of vm group. Raise: :vmodl Exception. """ try: cluster_obj = self._get_cluster_object_by_name(clustername) DRSConfig = vim.cluster.DrsConfigInfo() #vim.cluster.DrsConfigInfo.DrsBehavior.fullyAutomated DRSConfig.enabled = True DRSConfig.vmotionRate = vmotion_rate DRSConfig.defaultVmBehavior = vim.cluster.DrsConfigInfo.DrsBehavior.fullyAutomated clusterSpec = vim.cluster.ConfigSpecEx() clusterSpec.drsConfig = DRSConfig logger.info("enabling cluster {}".format(clustername)) enable_task = cluster_obj.ReconfigureEx(spec=clusterSpec, modify=True) self._wait_for_tasks([enable_task], task_name="Enable DRS") except vmodl.MethodFault as error: logger.error("Exception raise {}".format(error.msg))
def get_mac_address(self, interface='eth1', style='linux'): """ Helper method to get MAC address of interface. Returns: :str of an MAC address on success or None on failure :MAC address format is linux style by default like 00:00:0c:c0:ab:07 :with varmour style which is like 0:0:c:c0:ab:7 """ output = self.show_interface(interface) try: mac_addr = output[0][3] except (IndexError) as e: return None self._access.log_command("MAC address is {}".format(mac_addr)) if not style or style == 'linux': return mac_addr elif style == 'varmour': mac = mac_addr.split(':') new_mac = list() for i in mac: if i.startswith('0'): i = i[1:] new_mac.append(i) new_mac = ':'.join(new_mac) return new_mac else: logger.error( "Only 'linux' or 'varmour' style is supported for get_mac_address" ) return None
def _create_cluster_group_by_host(self, hostname, clustername, groupname="host-group-1"): """ method that creates DRS host group. Args: :hostname (str): name of host :clustername (str): name of DRS cluster where user wants to create host rule. :groupname(str): name of host group. Raise: :vmodl Exception. """ try: host_obj = self._get_host_object_by_name(hostname) cluster_obj = self._get_cluster_object_by_name(clustername) cluster_group_info = vim.cluster.HostGroup() cluster_group_info.name = groupname cluster_group_info.host = [host_obj] group_spec = vim.cluster.GroupSpec() group_spec.info = cluster_group_info group_spec.operation = 'add' group_spec.removeKey = groupname clusterSpec = vim.cluster.ConfigSpecEx() clusterSpec.groupSpec.append(group_spec) logger.info("Creating group {} on the host {}".format( groupname, hostname)) host_group_task = cluster_obj.ReconfigureEx(spec=clusterSpec, modify=True) self._wait_for_tasks([host_group_task], task_name="Host Group Creation") except vmodl.MethodFault as error: logger.error("Exception raised {}".format(error.msg))
def exec_background(self, cmd=None, outfile='temp', redirect=False, outdir=None, search_expr=None, search_full=False): """ method to run the command as part of the process in the background. kwargs: :cmd (str): linux command to be run in the background :outfile (str): :redirect (bool): :search_expr (str): :search_full (bool) : True/False .The pattern is normally only matched against the process name. When the value is False, the full command line is used returns: :pid (int): pid of the remote process """ pid = None remote_loc = None if not cmd: pass # TODO: raise exception or warning if not self.check_cmd_existence(cmd.split(' ')[0]) or \ not self.check_writable(): return pid, remote_loc if not search_expr: search_expr = cmd if not outdir: outdir = self.get_home_dir() if redirect: uid = strftime("%H:%M:%S", gmtime()).replace(':', '') file_name = '.'.join(('_'.join((outfile, uid)), 'txt')) remote_loc = '{}/{}'.format(outdir, file_name) bg_cmd = "nohup {} > {} 2>&1 &".format(cmd, remote_loc) else: bg_cmd = "nohup {} 2>&1 &".format(cmd) output = self._access.exec_command(bg_cmd) if output is False: logger.error("command: {} failed!".format(cmd)) else: pass pid = self.get_pid(search_expr, search_full) if not pid: pass # TODO: raise or log warn return pid, remote_loc
def convert_vm_to_template(self, vmname): try: vm_obj = self._get_vm_object_by_name(vmname) vm_obj.MarkAsTemplate() except VmNotFound as e: logger.error(e.args[0]) return e.args[0] return None
def recover_vcenter_workloads(self, filename=None): """ this method is apply to recover workloads. i.e putting workloads to its original state.this method is used for disaster recovery. Args: :original (list): original backup list where, user wants to revert back. """ if not filename: return original = '' try: fd = open(filename, 'r') for lines in fd.readline(): original += lines fd.close() except IOError: logger.error("Can not Open file {}. Please see filename and\ path".format(filename)) original_state = ast.literal_eval(original) current_state = self.take_backup_vcenter() for current_obj in current_state: vm = current_obj['name'] current_host = current_obj['host'] current_network = current_obj['network'] for original_obj in original_state: if original_obj['name'] == vm: if original_obj['host'] == current_host: logger.info("Host of VM:{} is the same as " "original. Not changed.".format(vm)) else: logger.info("Host is not the same for VM:{}" "original host was {}, changed " "host is {}.".format( vm, original_obj['host'], current_host)) logger.info( "Going to change host for VM:{}...".format(vm)) self.vmotion(vm, original_obj['host']) if current_network == original_obj["network"]: logger.info("Network of VM:{} is the same as original." " Not changed.".format(vm)) else: logger.info("Network is not the same for VM:{}" " original network was {}, " "changed network is " "{}".format(vm, original_obj["network"], current_network)) logger.info("Network is going to change....") logger.info("Removing va-tag..") self.remove_tag_from_vm(vm) for nets in original_obj["network"]: for interface, portgroup in nets.items(): self.change_vm_vnic(vm, interface, portgroup)
def _va_get_version(self): try: version_info = self._access.va_shell("cat /version") except Exception as err: logger.error(err) build_version = re.search(r'cat /version\s+(.*)', \ version_info, re.I | re.M).group(1).strip() return (build_version)
def update_vlan(self, pg_name, vlan_id): """ Implementation of upate_porgroup to the define value Kwargs: :pg_name (str): valid portgroup name of the vSwitch :vlan_id (str): valid vlan id of the vSwitch Returns: 0: failed 1: succeed """ switch_name = self._name #self.log_command(self.list_vswitch_cmd) results = self._shell.exec_command(self.list_vswitch_cmd) result = results[0] reg = r'switch\s+name\s+num\s+ports\s+used\s+ports\s+configured\s+\ ports\s+mtu\s+uplinks' for idx in range(0, len(result.split('\n'))): if re.search(reg, result.split('\n')[idx].strip(), re.I) is not None: break elif idx == len(result.split('\n')) - 1: #self.log_command(self.list_vswitch_cmd) results = self._shell.exec_command(self.list_vswitch_cmd) result = results[0] break chk_info = re.findall(switch_name, result) if (len(chk_info) == 0): return False buffer = re.sub("\r\n", "\n", result) linelist = re.split("\n", buffer) for line in linelist: learned_mac = re.findall('\s+'+str(pg_name) +\ '\s+[\d]+\s+[\d]+\s+[^\s]'+'|\s+'+str(pg_name)+\ '\s+[\d]+\s+[\d]+', line) if (len(learned_mac) > 0): int_info = learned_mac[0].split(" ") for val in int_info: if val != "": int_name = val break cmd = "esxcfg-vswitch -p {} -v {} {}".format(int_name, \ vlan_id, switch_name) self.log_command(cmd) self._shell.exec_command(cmd) if self.check_vlan(pg_name=pg_name, vlan_id=vlan_id): logger.info( "Succeed to update portgroup {} to vlan {}".format( pg_name, vlan_id)) return True logger.error("Failed to update portgroup {} to vlan {}".format( pg_name, vlan_id)) return False
def service_running(self, service=None): """ check if the service is running on the remote machine. kwargs: :service (str): name of the service daemon on the host """ executable_service = service if service == 'tftpd-hpa': service = 'tftpd' executable_service = 'in.{}'.format(service) elif service == 'bind9': service = 'named' executable_service = '{}'.format(service) executables = [ '/usr/bin/{}'.format(executable_service), '/usr/sbin/{}'.format(executable_service) ] out_list = list() ps_cmd = "ps ax | grep {} |grep -v grep".format(service) if service == 'nfs-kernel-server': executable_service = 'Started NFS server and services' ps_cmd = '/etc/init.d/{} status'.format(service) ps_output = self._access.exec_command(ps_cmd) logger.debug('Service info: {}'.format(ps_output)) if not ps_output: logger.error('Service "{}" is not running!'.format(service)) return False if service == 'nfs-kernel-server': if re.search(r'%s' % executable_service, ps_output.splitlines()[-1]) is not None: logger.info('Service is running: \n{}'.format(ps_output)) return True else: logger.error('Service "{}" is not running!'.format(service)) return False else: for line in ps_output.splitlines(): line = line.rstrip() cmd_pos = line.find(ps_cmd) if cmd_pos >= 0: ps_out = line[0:cmd_pos].split() ps_out.append(line[cmd_pos:]) else: ps_out = line.split() out_list.append(tuple(ps_out)) for output in out_list: for service in executables: if service in output: logger.info( 'Service is running: \n{}'.format(ps_output)) return True
def update_vm_virtual_nic_state(self, vmname, nic, new_nic_state): """ method that change vm's vnic's MAC address provided by user. Args: :vmname (str): name of vm that user wants to change network. :nic(str): VM's network interface. example, "Network adapter 1". :new_nic_state (str): connect/disconnect/delete Raise: :VmNicNotAvailable,vmodl Exception. """ try: virtual_nic_device = None vm_obj = self._get_vm_object_by_name(vmname) for dev in vm_obj.config.hardware.device: if isinstance(dev, vim.vm.device.VirtualEthernetCard) \ and dev.deviceInfo.label == nic: virtual_nic_device = dev if not virtual_nic_device: raise RuntimeError( 'Virtual {} could not be found.'.format(nic)) virtual_nic_spec = vim.vm.device.VirtualDeviceSpec() virtual_nic_spec.operation = \ vim.vm.device.VirtualDeviceSpec.Operation.remove \ if new_nic_state == 'delete' \ else vim.vm.device.VirtualDeviceSpec.Operation.edit virtual_nic_spec.device = virtual_nic_device virtual_nic_spec.device.key = virtual_nic_device.key virtual_nic_spec.device.macAddress = virtual_nic_device.macAddress virtual_nic_spec.device.backing = virtual_nic_device.backing #virtual_nic_spec.device.backing.port = \ #virtual_nic_device.backing.port virtual_nic_spec.device.wakeOnLanEnabled = \ virtual_nic_device.wakeOnLanEnabled connectable = vim.vm.device.VirtualDevice.ConnectInfo() if new_nic_state == 'connect': connectable.connected = True connectable.startConnected = True elif new_nic_state == 'disconnect': connectable.connected = False connectable.startConnected = False else: connectable = virtual_nic_device.connectable virtual_nic_spec.device.connectable = connectable dev_changes = [] dev_changes.append(virtual_nic_spec) spec = vim.vm.ConfigSpec() spec.deviceChange = dev_changes task = vm_obj.ReconfigVM_Task(spec=spec) self._wait_for_tasks([task], "Update nic state") except vmodl.MethodFault as error: logger.error("Exception raised {}".format(error.msg))
def change_vm_name(self, oldname, newname): try: vm_obj = self._get_vm_object_by_name(oldname) VM = self.content.searchIndex.FindByUuid(None, vm_obj.config.uuid, True, False) specification = vim.vm.ConfigSpec(name=newname) task = VM.ReconfigVM_Task(specification) return self._wait_for_tasks([task], task_name="Rename VM") except Exception as error: logger.error("Exception raised {}".format(error.msg))
def _enable_affinity_rule(self, clustername, rule_name, flag=True): """ method that creates DRS affinity rule, i.e vm is staying on the host when DRS enable. Args: :hostname (str): name of host :clustername (str): name of DRS cluster where user wants to create vm rule. :hostgroupname(str): name of host group. :vmgroupname(str): name of vm group. :rule_name (str): name of affinity rule. Raise: :vmodl Exception. """ try: found = False cluster_obj = self._get_cluster_object_by_name(clustername) rules = cluster_obj.configurationEx.rule for rule in rules: if rule.name == rule_name: found = True rule_spec = vim.cluster.RuleSpec() rule_spec.operation = "edit" rule_spec.removeKey = rule.key rule_info = vim.cluster.VmHostRuleInfo() rule_info.enabled = flag rule_info.name = rule.name rule_info.key = rule.key rule_info.mandatory = rule.mandatory rule_info.vmGroupName = rule.vmGroupName rule_info.affineHostGroupName = rule.affineHostGroupName rule_info.userCreated = rule.userCreated rule_spec.info = rule_info clusterSpec = vim.cluster.ConfigSpecEx() clusterSpec.rulesSpec.append(rule_spec) if flag: logger.info( "Enable affinity rule {} on cluster {}".format( rule_name, clustername)) else: logger.info( "Disable affinity rule {} on cluster {}".format( rule_name, clustername)) enable_rule_task = cluster_obj.ReconfigureEx( spec=clusterSpec, modify=True) return self._wait_for_tasks([enable_rule_task], task_name="Change Rule") if not found: logger.info("Could not find the rule {} on cluster {}".format( rule_name, clustername)) return "ERROR" except vmodl.MethodFault as error: logger.error("Exception raise {}".format(error.msg))
def take_backup_vcenter(self, filename=None): """ this method is taking snapshot of test-bed and returning file or json datastucture as a snapshot of vcenter topology.Note here, It takes snapshot of entire vcenter.It also saves returned value in a file provide by user. Args: :filename (str): full path of the file and filename where user wants to save file. Returns: :data (list):return data as list of json format so user can store this data for test-bed recovery. """ metadata_for_vcenter = [] vm_obj_list = self._get_content("vm") dvs_obj_list = self._get_content("dvs") for vm_obj in vm_obj_list: vm_json = {} if 'name' not in dir(vm_obj.runtime.host): continue host = vm_obj.runtime.host.name device_info = vm_obj.config.hardware.device vm_network_list = [] for devices in device_info: if isinstance(devices, vim.vm.device.VirtualEthernetCard): vm_network = {} if "DVSwitch:" in devices.deviceInfo.summary: port_group_key = devices.backing.port.portgroupKey pg_data = None if list(filter(lambda x: x.key == \ port_group_key,dvs_obj_list)) != []: pg_data = list( filter(lambda x: x.key == port_group_key, dvs_obj_list)).pop() summary_for_vds = pg_data.name vm_network[devices.deviceInfo.label] = summary_for_vds else: vm_network[ devices.deviceInfo.label] = \ devices.deviceInfo.summary vm_network_list.append(vm_network) vm_json['name'] = vm_obj.name vm_json['host'] = host vm_json['network'] = vm_network_list metadata_for_vcenter.append(vm_json) try: if filename: fd = open(filename, 'w') fd.writelines(str(metadata_for_vcenter)) except IOError: logger.error("Can not Open file {}. Please see filename and\ path".format(filename)) return metadata_for_vcenter
def va_config_routes(self): """ add route for vArmour device and linux pc according to topo file Kwargs: None Return: True : Succeed to add route for pc or vArmour devices according to topo file. False : Failed to add route for pc or vArmour devices according to topo file. Examples: vatpobj = VaTopo(test_param_file, topo_file) vatpobj.va_config_routes() """ logger.debug("Start to config route") route_info = self.route_info for key in route_info.keys(): devobj = self.testdata.va_get_by_uniq_id(key, False) routes = route_info[key] node_type = devobj.get_nodetype() if (node_type == 'linux' or node_type == 'dir'): #only support linux pc and director now. devobj = self.testdata.va_get_by_uniq_id(key) for route in routes: if (not 'auto_set' in route) or (route['auto_set'] != 1): continue else: # if auto_set =1 , it means need to config route for defined pc if node_type == 'linux': result = devobj.config_route( route['dst'], route['netmask'], route['gateway']) if node_type == 'dir': #address netmask format. if isinstance(route['netmask'], int): address = route['dst'] + '/' + route['netmask'] else: address = route['dst'] + '/' + route['netmask'] result = devobj.va_set_route( ipaddress.IPv4Network(address), route['gateway'], True) if not result: logger.error('Failed to config route') logger.debug(devobj.show_route()) logger.debug(devobj.show_interface()) return False logger.info("Completed to config route") return True
def create_affinity_rule_for_host(self, hostname, clustername, hostgroupname="host-group-1", vmgroupname="vm-group-1", rule_name="affinity-rule-1"): """ method that creates DRS affinity rule, i.e vm is staying on the host when DRS enable. Args: :hostname (str): name of host :clustername (str): name of DRS cluster where user wants to create vm rule. :hostgroupname(str): name of host group. :vmgroupname(str): name of vm group. :rule_name (str): name of affinity rule. Raise: :vmodl Exception. """ try: cluster_obj = self._get_cluster_object_by_name(clustername) self._create_cluster_group_by_host(hostname, clustername, hostgroupname) self._create_cluster_group_by_vms(hostname, clustername, vmgroupname) vm_host_rule_info = vim.cluster.VmHostRuleInfo() vm_host_rule_info.name = rule_name vm_host_rule_info.enabled = True vm_host_rule_info.mandatory = True vm_host_rule_info.vmGroupName = vmgroupname vm_host_rule_info.affineHostGroupName = hostgroupname rule_spec = vim.cluster.RuleSpec() rule_spec.operation = 'add' rule_spec.info = vm_host_rule_info rule_spec.removeKey = vm_host_rule_info.key clusterSpec = vim.cluster.ConfigSpecEx() clusterSpec.rulesSpec.append(rule_spec) logger.info("Creating affinity rule {} on cluster {}".format( rule_name, clustername)) affinity_rule_task = cluster_obj.ReconfigureEx(spec=clusterSpec, modify=True) return self._wait_for_tasks([affinity_rule_task], task_name="Affinity Rule Creation") except vmodl.MethodFault as error: logger.error("Exception raise {}".format(error.msg))
def revert_links(self): """ revert link according to inventory file Kwargs: None Return: True : Succeed to revert link according to inventory file. False : Failed to revert link according to inventory file. Examples: vatpobj = VaTopo(test_param_file, topo_file) vatpobj.revert_links() """ inventory_tb_data = load_yaml(self.testdata.inventory_file) swobjs = {} for dev_key in inventory_tb_data.keys(): for dev in inventory_tb_data[dev_key]: if not 'interfaces' in dev or len(dev['interfaces']) == 0: continue else: vswitches = dev['hvisor']['vswitches'] vswitches_l = vswitches.split(' ') for int in dev['interfaces']: logger.debug( 'Check switch {} if it is in hv {}'.format( int['vswitch'], dev['hvisor']['mgmt_ip'])) tag = 0 for vswname in vswitches_l: if vswname == int['vswitch']: logger.debug('check vswitch on hv.done') tag = 1 break if tag == 0: logger.error('vswitch {} is not in hv {}' . format (int['vswitch'], \ dev['hvisor']['mgmt_ip'])) return False #update vlan for each interface logger.info('Clean access vlan {} for {}:{}'\ .format(int['vlan'],dev['uniq_id'],int['phy_name'])) hv_uqid = dev['hvisor']['uniq_id'] vswobj = self.swobjs[hv_uqid][int['vswitch']] if not vswobj.update_vlan(int['port_group'], int['vlan']): return False logger.info("Completed to revert links") return True
def add_vm_vnic(self, vmname, portgroup): """ method to adding new vnic. Args: :vmname (str) :name of vm that user wants to abstract from vm object list Return: :result (bool) True: If Successfully added vnic. False: If Fail to remove vnic. """ try: vm_obj = self._get_vm_object_by_name(vmname) nic_change = [] nic_spec = vim.vm.device.VirtualDeviceSpec() nic_spec.operation = \ vim.vm.device.VirtualDeviceSpec.Operation.add nic_spec.device = vim.vm.device.VirtualVmxnet() nic_spec.device.deviceInfo = vim.Description() nic_spec.device.backing = vim.vm.device.VirtualEthernetCard.NetworkBackingInfo( ) nic_spec.device.backing.useAutoDetect = False nic_spec.device.backing.network = \ self._get_network_object_by_name(portgroup) nic_spec.device.backing.deviceName = portgroup nic_spec.device.connectable = vim.vm.device.VirtualDevice.ConnectInfo( ) nic_spec.device.connectable.startConnected = True nic_spec.device.connectable.startConnected = True nic_spec.device.connectable.allowGuestControl = True nic_spec.device.connectable.connected = False nic_spec.device.connectable.status = 'untried' nic_spec.device.wakeOnLanEnabled = True nic_spec.device.addressType = 'assigned' nic_change.append(nic_spec) add_nic_spec = \ vim.vm.ConfigSpec(deviceChange=nic_change) add_nic_task = \ vm_obj.ReconfigVM_Task(spec=add_nic_spec) self._wait_for_tasks([add_nic_task], "add nic") except vmodl.MethodFault as error: logger.error("Exception raised {}".format(error.msg))
def get_mac(self, interface='eth1'): """ Helper method to get mac address of interface. Returns: :str of an mac address on success or None on failure: """ output = self.show_interface(interface) try: mac_addr = output[0][3] except (IndexError) as e: logger.error('Failed to get HWaddr: {}'.format(output)) return None logger.info('HWaddr: {}'.format(mac_addr)) return mac_addr
def stop(self): """ method to stop the icmp traffic process spawned by the start method. """ shell = LinuxShell(host=self._client, user=self._ssh_userid, pswd=self._ssh_passwd) output = shell.exec_command("killall sshpass") logger.info('Output :' + str(output)) if self._ssh_userid not in str(output): logger.info('SSH closed successfully') return True, None else: logger.error('SSH NOT closed successfully' + str(output)) return False, output
def check_cmd_existence(self, cmd): """ method to check if command existence param : cmd : command example : check_cmd_existence('lftp') return: True on success or False on failure """ logger.info("\nIn subroutine: " + sys._getframe().f_code.co_name) cmd_info = self._access.shell("whereis {}".format(cmd)) cmd_info = cmd_info[0].strip('\n').split(':') if len(cmd_info[1]) == 0: logger.error('Not found command: {}'.format(cmd)) return False return True
def va_reset_director(self): """ reset system configuration for each director Kwargs: None Return: True : Succeed to reset director according to topo file. False : Failed to reset director according to topo file. Examples: vatpobj = VaTopo(test_param_file, topo_file) vatpobj.va_reset_director() """ logger.info("Start to reset system configuration for directos") result, devobjs = self.get_dir_object() if result: for devobj in devobjs: logger.info('reset system configuration for director') if devobj._access._resource.get_user().get( 'name') == 'varmour_no_cli': continue devobj.va_cli('show system') result = devobj.va_reset_all() if self.add_nocli_user: devobj.va_add_user( **{ 'name': 'varmour_no_cli', 'password': devobj._access._resource.get_user().get( 'password'), 'role': 'admin', 'is_commit': True }) if not result: return False else: logger.error('Not found any director devices') return False logger.info("Completed to reset system configuration for director") return True