def udp_server(servers, port, **kwargs): """ API Helps to start UDP Listen on given Server and Port Args: :param servers: list of server handles want to Listen on Server :type servers: List :param port: listener port :type port: int/str Raises: KeyError """ if not servers: servers = get_all_server_handle() elif isinstance(servers, basestring): servers = [servers] for server_handle in servers: server = get_server_by_handle(server_handle) server_vm = server.vm() server_ip = server.ip() cmd = 'python /root/common/scripts/udp_server.py --ip %s --p %s &> /tmp/udp_server_out_%s &' % ( server_ip, port, server_ip) logger.info("udp_server command: %s" % cmd) server_vm.execute_command(cmd, log_error=False) sleep(10) out = server_vm.execute_command( 'ps aux | grep \'udp_server.py\' | grep -v grep ') if not out: fail("UDP Server Process not started .. %s " % out) out = server_vm.execute_command('cat /tmp/udp_server_out_%s' % server_ip) if 'starting' not in ''.join(out): error("UDP Server not started .. %s " % out)
def test_switch_mode_session(self): ''' switch mode session and test if the requests on the session go through correctly ''' switch_mode(user='******', password='******') session = rest.get_session() clear_session() switch_mode(password='******') try: rest.get('serviceengine') except Exception as e: logger.info("authentication error expected: " + str(e)) else: assert 0, "Session is not getting updated based on password" switch_mode(session=session) rest.get('serviceengine') # REVIEW: Once session in mode is set, no further switch_mode works switch_mode(user='******', password='******') try: rest.get('serviceengine') except Exception as e: logger.info("authentication error expected: " + str(e)) else: assert 0, "Once switched on session, no other switch mode works" clear_session(all_sessions=True)
def make_follower_ready_for_cluster(ctrl_vm, **kwargs): """ Resets Controller password to admin/admin """ config = AviConfig.get_instance() mode = config.get_mode() logger.debug("Current Default Mode %s" % mode) username = mode['user'] current_password = mode['password'] logger.info('Reset controller password for %s' % ctrl_vm.ip) try: config.switch_mode(password=ctrl_vm.password) session = create_session(ctrl_vm) config.switch_mode(session=session) # REVIEW password should be original default password reset_admin_user(username=username, password='******', old_password=ctrl_vm.password, **kwargs) except Exception as e: logger.debug("Trying with admin/admin") config.switch_mode(password='******') session = create_session(ctrl_vm) config.switch_mode(session=session) # REVIEW password shoulde original default password reset_admin_user(username=username, password='******', old_password='******', **kwargs) config.switch_mode(session=None, password=current_password)
def get_all_vnic_flows_created_on_all_secondary_se(virtualservice): se_name_list = get_vs_secondary_se_list(virtualservice) logger.info('get dispatcher stats on secondary: se_name_list %s' % se_name_list) c = 0 for se_name in se_name_list: se_info = get_se_info(se_name, connected=True) d_stats = [] for vnic in se_info['data_vnics']: if_name = vnic['if_name'] params = {'intfname': if_name} resp_code, json_data = rest.get('serviceengine', name=se_name, path='flowtablestat', params=params) for dsr in json_data: if 'dispatch' in dsr: d_stats.append(dsr['dispatch'][0]) if infra_utils.get_cloud_context_type() == 'baremetal': vnic = se_info['mgmt_vnic'] if_name = vnic['if_name'] params = {'intfname': if_name} resp_code, json_data = rest.get('serviceengine', name=se_name, path='flowtablestat', params=params) for dsr in json_data: if 'dispatch' in dsr: d_stats.append(dsr['dispatch'][0]) for stats in d_stats: c = c + stats['flow_rx_create'] return c
def update_healthmonitor(hm_name, **kwargs): """ :param hm_name: :param kwargs: :return: """ logger.info('update healthmonitor %s' % hm_name) status_code, json_hm_data = rest.get('healthmonitor', name=hm_name) if kwargs.get('type'): json_hm_data['type'] = kwargs.get('type') if kwargs.get('send_interval'): json_hm_data['send_interval'] = kwargs.get('send_interval') if kwargs.get('receive_timeout'): json_hm_data['receive_timeout'] = kwargs.get('receive_timeout') if kwargs.get('successful_checks'): json_hm_data['successful_checks'] = kwargs.get('successful_checks') if kwargs.get('failed_checks'): json_hm_data['failed_checks'] = kwargs.get('failed_checks') rest.put('healthmonitor', name=hm_name, data=json_hm_data)
def delete_servers(pool_name, how_many, prefix, cleanup_backend=True): """ :param pool_name: :param how_many: :param prefix: :param cleanup_backend: :return: """ logger.info('delete servers from pool %s' % pool_name) config = infra_utils.get_config() context_key = config.get_context_key() pool = config.site_objs[context_key]['pool'].get(pool_name) for count in range(int(how_many)): handle = '%s%s' % (prefix, count + 1) _delete_server_backend(handle, pool, cleanup_backend) for count in range(int(how_many)): handle = '%s%s' % (prefix, count + 1) _delete_server_model(handle, pool) st, pool_json_ctrl = rest.get('pool', name=pool_name) pool_server_json = [] pool_server_dict = pool.servers for pool_server in pool_server_dict.values(): pool_server_json.append(pool_server.get_json()) pool_json_ctrl['servers'] = pool_server_json rest.put('pool', name=pool_name, data=pool_json_ctrl)
def negative_update_server(pool_name, handle, **kwargs): """ :param pool_name: :param handle: :param kwargs: :return: """ server = infra_utils.get_server_by_handle(handle) response_code, json_pool_data = rest.get('pool', name=pool_name) if kwargs.get('port'): for index, rest_server in enumerate(json_pool_data.get('servers')): json_server_data = server.get_json() server_ip = json_server_data.get('ip') if server_ip and server_ip.get( 'addr') == rest_server['ip']['addr']: json_pool_data['servers'][index]['port'] = kwargs.get('port') try: rest.put('pool', name=pool_name, data=json_pool_data) logger_utils.fail('No exception was raised in negative test case') except Exception as e: logger.info('Field port must be in the range 1-65535') return True
def azure_init(self): """ AZURE connection clients init """ if not self.credentials: try: self.credentials = ServicePrincipalCredentials( client_id=self.application_id, secret=self.secret_key, tenant=self.tenant_id) except Exception as e: fail('AZURE credentials are wrong and failed with error:%s' % str(e)) if not self.compute_client: logger.info('Connecting to AZURE Compute client') try: self.compute_client = ComputeManagementClient( self.credentials, self.subscription_id) except Exception as e: fail('AZURE Compute client failed with error:%s' % str(e)) if not self.network_client: logger.info('Connecting to AZURE Network client') try: self.network_client = NetworkManagementClient( self.credentials, self.subscription_id) except Exception as e: fail('AZURE Network client failed with error:%s' % str(e))
def get_vm_ip_for_name(self, vm_name=None, public_ip_address=False): """ Get IP address for given vm """ vm_ip_addr = None if not vm_name: vm_name = self.vm_json.get('name') try: vm_obj = self.compute_client.virtual_machines.get( self.resource_group, vm_name) for interface in vm_obj.network_profile.network_interfaces: logger.debug('Got interface details ..: %s' % interface.id) nic_name = " ".join(interface.id.split('/')[-1:]) #sub="".join(interface.id.split('/')[4]) ip_addr_objs = self.network_client.network_interfaces.get( self.resource_group, nic_name).ip_configurations for ip_obj in ip_addr_objs: logger.info( " Private IP Address: %s , for Nic: %s, IP Config obj:%s" % (ip_obj.private_ip_address, nic_name, ip_obj.name)) if ip_obj.primary: if public_ip_address: vm_ip_addr = ip_obj.public_ip_address else: vm_ip_addr = ip_obj.private_ip_address logger.info("IP Address: %s , for Nic: %s" % (vm_ip_addr, nic_name)) return vm_ip_addr except Exception as e: fail('Error while getting the ip address for vm, exp: %s' % e.message)
def dns_get_ports_for_fqdn(dns_resolver, qname): """ :param dns_resolver: :param qname: :return: """ try: rsp = dns_resolver.query(qname, 'SRV') except Exception as e: # Try with a different port 8053, in case # of controller running as a container dns_resolver.port = 8053 try: rsp = dns_resolver.query(qname, 'SRV') except Exception as e: # Reset the port for every query fail dns_resolver.port = 53 logger.info("DNS get Ports returned: %s" % str(e)) return [] ports = [] for record in rsp: ports.append(record.port) return sorted(ports)
def validate_vs_dns_deleted(vs_dns_name, retries=5, dns_vs_vip=None, **kwargs): """ :param vs_dns_name: :param retries: :param dns_vs_vip: :param kwargs: :return: """ if not dns_vs_vip: logger.info("[SKIPPING] DNS check for VS as no DNS vip provided. Note," " controller based DNS is not supported anymore.") return True count = retries while count: ipl, portl = dns_get_ip_ports_for_fqdn( dns_get_resolver(dns_vs_vip=dns_vs_vip), vs_dns_name) if ipl or portl: count -= 1 logger_utils.asleep(delay=5) else: return True logger_utils.fail("Unexpected[%s]: DNS entries %s, %s found" % (vs_dns_name, ipl, portl))
def dns_get_ip_ports_for_fqdn(dns_resolver, qname): ips = dns_get_ips_for_fqdn(dns_resolver, qname) if not ips: logger.info("No IPs found for '%s'" % qname) return [], [] ports = dns_get_ports_for_fqdn(dns_resolver, qname) return ips, ports
def dns_get_ips_for_fqdn(dns_resolver, qname): """ :param dns_resolver: :param qname: :return: """ try: rsp = dns_resolver.query(qname, 'A') except Exception as e: # Try with a different port 8053, in case # of controller running as a container dns_resolver.port = 8053 try: rsp = dns_resolver.query(qname, 'A') except Exception as e: # Reset the port for every query fail dns_resolver.port = 53 logger.info("DNS get IPs returned: %s:%s" % (type(e), str(e))) return [] ips = [] for record in rsp: ips.append(record.address) return sorted(ips)
def dns_get_resolver(ns_list=[], append=False, use_controller=True, dns_vs_vip=None): """ :param ns_list: :param append: :param use_controller: :param dns_vs_vip: :return: """ dns_resolver = resolver.Resolver() # Review: reduce the default timeout of 30 seconds # so checking deleted entries will return faster # checking for existing entries may time out spuriously but generally we # have a separate retry mechanism on them dns_resolver.lifetime = 5 if not append: dns_resolver.nameservers = [] dns_resolver.nameservers.extend(ns_list) if dns_vs_vip: # prefer dns vs dns_resolver.nameservers.append(dns_vs_vip) elif use_controller: # fallback to controller dns_resolver.nameservers.append(controller_lib.get_controller_ip()) logger.info("DNS resolvers: %s" % dns_resolver.nameservers) return dns_resolver
def get_core_filenames(): # N/A return {} se_list = get_vm_of_type('se', state="OPER_UP") ctrlr_list = get_vm_of_type('controller') all_vms = se_list + ctrlr_list if all_vms: result = {} for vm in all_vms: try: file_list = vm.execute_command( 'sudo ls -l /var/lib/avi/archive/*.tar.gz', log_error=False) except Exception as e: logger.info( "Unable to execute command on VM. Vm may be down %s" % str(e.msg)) if not vm.ip: logger.warning('vm is not there , vm name: %s' % vm.name) return {} if vm in ctrlr_list and vm.ip: fail("Controller not in right state") if file_list: result[vm.ip] = str(file_list).splitlines() else: result = {} return result
def delete_oldest_vhd(self, vhd_name): blob_list = [] vhd = vhd_name.split('-') try: block_blob_service = PageBlobService( account_name=self.storage_account, account_key=self.storage_account_key) generator = block_blob_service.list_blobs(self.container_name) delete_vhd = "%s-%s" % (vhd[0], vhd[1]) for blob in generator: # get specific version vhd files if delete_vhd in blob.name: blob_list.append(blob) except Exception as e: fail('Error while getting vhd list: %s' % e.message) # keep last 4 controller vhd if len(blob_list) < 4: return old_date = blob_list[0].properties.last_modified oldest = blob_list[0] for blob in blob_list: if blob.properties.last_modified < old_date: old_date = blob.properties.last_modified oldest = blob try: logger.info('Deleting %s' % oldest.name) block_blob_service.delete_blob(self.container_name, oldest.name) except Exception as e: fail('Error while deleting vhd file: %s' % e.message)
def negative_update_pool(pool_name, expected_error=None, **kwargs): """ :param pool_name: :param expected_error: :param kwargs: :return: """ logger.info('update pool %s, fileds: %s' % (pool_name, kwargs)) _, json_pool_data = rest.get('pool', name=pool_name) if kwargs.get('name') or kwargs.get('name') == '': json_pool_data['name'] = kwargs.get('name') if kwargs.get('default_server_port'): json_pool_data['default_server_port'] = kwargs.get( 'default_server_port') if kwargs.get('graceful_disable_timeout'): json_pool_data['graceful_disable_timeout'] = kwargs.get( 'graceful_disable_timeout') if kwargs.get('connection_ramp_duration'): json_pool_data['connection_ramp_duration'] = kwargs.get( 'connection_ramp_duration') try: rest.put('pool', name=pool_name, data=json_pool_data) logger_utils.fail('No exception was raised in negative test case') except Exception as e: if expected_error: if expected_error.lower() not in str(e).lower(): logger_utils.fail('Expected error %s did not occur\n%s' % (expected_error, str(e))) return True
def run_commands(cli_instructs, ctlr_vm, **kwargs): config = infra_utils.get_config() cli_exe_path = suite_vars.workspace + ( "/python/bin/cli/bin/shell_client.py --user %s --password %s " % (ctlr_vm.user, ctlr_vm.password)) cli_instructs_path = '/tmp/%s' % uuid.uuid1() with open(cli_instructs_path, 'w') as fh: fh.write(string.join(cli_instructs, '\n')) cli_exec = '%s --address %s' % (cli_exe_path, ctlr_vm.ip) env = {} env['PYTHONPATH'] = suite_vars.workspace + '/python/lib' try: cat_proc = subprocess.Popen(['cat', cli_instructs_path], stdout=subprocess.PIPE, env=env) cli_proc = subprocess.Popen(cli_exec.split(), stdin=cat_proc.stdout, stdout=subprocess.PIPE, env=env) cat_proc.stdout.close() output = cli_proc.communicate()[0] logger.info(output) except OSError: logger_utils.fail('Could not open the shell at %s' % cli_exe_path) except subprocess.CalledProcessError: raise os.system('rm %s' % cli_instructs_path)
def get_all_servers_of_pool(pool_name): status_code, rsp = rest.get('pool', name=pool_name) ret = [] for server in rsp['servers']: ret.append(server['ip']['addr'] + ':%s' % server['port']) logger.info('return list: %s' % ret) return ret
def reconfigure_ip(self, port, new_ip): '''Reconfigure the original ip Arguments: vm_name: the vm that you wish to dev_change port: port you wish to change new_ip: new ip address you are changing the vm to ''' vm_name = self.vm_json.get('name') # get original ip logger.info('configIP \n') vm = self.server.get_vm_by_name(vm_name, self.datacenter) ssh_ip = vm.get_properties()['ip_address'] ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(ssh_ip, username='******', password='******') stdint, stdout, stderr = ssh.exec_command('ifconfig') output = stdout.readlines() found_port = False mask = '' for line in output: if found_port: tmp = re.search('Mask:(\S+)', line) if tmp: mask = tmp.group(1) break if port in line: found_port = True mask = '255.255.255.0' command = "echo aviuser | sudo -S ifconfig " + \ port + " " + new_ip + " netmask " + mask + " up" ssh.exec_command(command) logger.info("Successfully updated ip address")
def start_httperf_on_client(client_handles, vs_name, vs_port, uri, rate, num_conns, requests_per_session, **kwargs): """ Runs on the main interface of the client vm :param client_handles: :param vs_name: :param vs_port: :param uri: :param rate: :param num_conns: :param requests_per_session: :param kwargs: :return: """ method = kwargs.get('method', 'GET') client_handles = traffic_manager.get_client_handles_from_range(client_handles) vip = kwargs.get('vip', None) if not vip: vip = vs_lib.get_vip(vs_name) command = 'httperf ' # --timeout 0.5 command += '--hog --server %s --port %s --wsess %s,%s,0 --rate %s --uri "%s" --method %s --recv-buffer 1240000 --send-buffer 1240000 &> /tmp/httperf.txt &' % ( vip, vs_port, num_conns, requests_per_session, rate, uri, method) logger.info('start_httperf_on_client:'+command) for client_handle in client_handles: vm, ip = traffic_manager.get_client_by_handle(client_handle) vm.execute_command(command)
def restart(self, retry=False): ''' Restart VM restarts the vm given arguments: vm_name: vm you wish to restart ''' vm_name = self.vm_json.get('name') count = 1 if retry: count = 3 i = 0 vm = None # TODO: We could use @aretry here, waiting to finalize on its usage # within an exception. while i < count: i = i + 1 try: vm = self.server.get_vm_by_name(vm_name, self.datacenter) break except VIException as e: logger.info('%s VIException: %s' % (time.asctime(), e.message)) self.disconnect() self.reconnect_with_vcenter() time.sleep(2) continue if not vm: raise Exception('Could not find VM %s after %d retries' % (vm_name, count)) vm.power_off() vm.power_on()
def get_dst_mac(client_vm, client_ip, vip): """ API helps to get destination MAC Address Args: :param client_vm: Client VM Object :type client_vm: Object :param client_ip: Client VM IP :type client_ip: str :param vip: VIP IP address :type vip: str Return: Destination MAC Address for VIP """ if ":" in client_ip: return get_ip_mac_addr(vip, client_vm, 60) else: cip_mask = '.'.join(client_ip.split('.')[0:3]) vip_mask = '.'.join(vip.split('.')[0:3]) gateway_ip = cip_mask + '.1' logger.info("cip_mask: %s, vip_mask: %s" % (cip_mask, vip_mask)) if cip_mask == vip_mask: return get_ip_mac_addr(vip, client_vm, 60)
def poweroff(self, vm_name=None): ''' Power off a VM arguments: vm_name: vm you wish to resume ''' if not vm_name: vm_name = self.vm_json.get('name') @aretry(retry=10, delay=2, period=2) def getVm(): vm = self.server.get_vm_by_name(vm_name) return vm vm = getVm() if vm: vm.power_off() if vm.get_status() != "POWERED OFF": fail("VM could not power off. It is in " + vm.get_status() + " state") logger.info('vm : %s powered off, status: %s' % (vm_name, vm.get_status())) else: logger.debug('DEBUG: all_vms: %s' % self.server.get_registered_vms()) fail("Can't find the vm %s" % vm_name)
def test_controller_goes_down(self): ''' Test the request on sessions if the controller goes down Does the request on the sessions work when the controller comes back up ''' switch_mode(user='******', password='******') logger.info('Configuring cloud, This may take sometime..') setup_cloud(wait_for_cloud_ready=True) config = get_config() mode = config.get_mode() controller = config.get_vm_of_type('controller')[0] session = rest.get_session() data_1 = rest.get('serviceengine') cloud_obj = config.testbed[mode['site_name']].cloud_obj[mode['cloud']] controller_name = controller.name cloud_obj.powerOffVM(controller_name) try: rest.get('serviceengine') except Exception as e: logger.info('Expected ReadTimeout: ' + str(e)) cloud_obj.powerOnVM(controller_name) wait_until_n_cluster_nodes_ready() data_2 = rest.get('serviceengine') assert data_1 == data_2
def poweron(self, vm_name=None): ''' Power on a VM arguments: vm_name: vm you wish to resume ''' if not vm_name: vm_name = self.vm_json.get('name') @aretry(retry=10, delay=2, period=2) def getVm(): vm = self.server.get_vm_by_name(vm_name) return vm vm = getVm() try: vm.power_on() if vm.get_status() != "POWERED ON": fail("VM could not power on. It is in " + vm.get_status() + " state") logger.info('vm : %s powered on, status: %s' % (vm_name, vm.get_status())) except VIException as e: # If VM is already is powered on, ignore error if 'current state (Powered on)' in str(e): logger.info('vm: %s already is powered on ..' % vm_name) else: fail(e)
def create_microservice_group(msg_name, ms_name=[], **kwargs): """ :param msg_name: :param ms_name: :param kwargs: :return: """ tenant = infra_utils.get_config().get_mode(key='tenant') msg = { 'name': msg_name, 'tenant_uuid': rest.get_uuid_by_name('tenant', tenant), 'service_uuids': [] } for ms in ms_name: msg['service_uuids'].append(rest.get_uuid_by_name('microservice', ms)) try: rest.post('microservicegroup', name=msg_name, data=msg) # REVIEW why was this tenant_uuid? except Exception as e: if 'Micro service group with this Name and Tenant ref ' \ 'already exists' in str(e): logger.info('microservice group already exists, ignoring error') else: raise return msg_name
def fin(): logger.info('*' * 5 + " TEST CASE TEARDOWN: " + request.function.__name__ + '*' * 5) logger.report_errors() logger.info(sep + "TEST CASE END: " + request.function.__name__ + sep) logger.logger.removeHandler(logger.file_tc_handle) logger.logger.removeHandler(logger.file_robot_handle)
def get_elements_set(ip_set_output): ''' :param ip_set_output: :return: ''' ip_set_elements = set() ip_set = cleanup_and_parse_xml(ip_set_output) if ip_set and 'ipsets' in ip_set: ip_sets = ip_set['ipsets'] if ip_sets and 'ipset' in ip_sets: ipsets_set = ip_sets['ipset'] if ipsets_set and 'members' in ipsets_set: members = ipsets_set['members'] if members and 'member' in members: member = members['member'] if member: # it doesnt loop through for single element if len(member) > 1: for elem in member: ip_set_elements.add(elem['elem']) else: ip_set_elements.add(member['elem']) logger.info("Elements::" + str(ip_set_elements)) return ip_set_elements
def change_role_privileges(role_name, resource_name, access, **kwargs): """ API to Change/Update the User Role Privileges. Args: :param role_name: User access role name :type role_name: str :param resource_name: Privilege/Resource name :type resource_name: str :param access: User Access Mode :type access: str """ check_status_code = kwargs.pop('check_status_code', True) role_node = ApiNode('role', name=role_name) status_code, role = role_node.get() logger.info("status_code %s , resp %s" % (status_code, role)) change = False for privilege in role['privileges']: if privilege['resource'] == resource_name.upper(): privilege['type'] = access.upper() change = True if not change: data = {'resource': resource_name.upper(), 'type': access.upper()} role['privileges'].append(data) status_code, resp = role_node.put(data=json.dumps(role), check_status_code=check_status_code) logger.info("Change Role Privileges, status_code: %s " % status_code) return (status_code, resp)