def wait_for_intelligent_scalein(vs_name): """ :param vs_name: :return: """ _, vs_obj = rest.get('virtualservice', name=vs_name) pool_ref = pool_lib._get_pool_from_vs(vs_obj) pool_uuid = pool_ref.split('pool/')[1].split('#')[0] _, pool_obj = rest.get('pool', uuid=pool_uuid) asp_ref = pool_obj['autoscale_policy_ref'] as_policy_uuid = asp_ref.split('serverautoscalepolicy/')[1].split('#')[0] _, autoscale_policy = rest.get('serverautoscalepolicy', uuid=as_policy_uuid) min_size = autoscale_policy['min_size'] # now wait for time such that num_servers == min_size for _ in xrange(30): _, pool_obj = rest.get('pool', uuid=pool_uuid) num_servers = len(pool_obj['servers']) if num_servers <= min_size: break logger_utils.asleep(delay=15) if num_servers > min_size: logger_utils.fail('scalein did not succeed pool %s' % (str(pool_obj['servers'])))
def udp_server(servers, port, **kwargs): """ API Helps to start UDP Listen on given Server and Port Args: :param servers: list of server handles want to Listen on Server :type servers: List :param port: listener port :type port: int/str Raises: KeyError """ if not servers: servers = get_all_server_handle() elif isinstance(servers, basestring): servers = [servers] for server_handle in servers: server = get_server_by_handle(server_handle) server_vm = server.vm() server_ip = server.ip() cmd = 'python /root/common/scripts/udp_server.py --ip %s --p %s &> /tmp/udp_server_out_%s &' % ( server_ip, port, server_ip) logger.info("udp_server command: %s" % cmd) server_vm.execute_command(cmd, log_error=False) sleep(10) out = server_vm.execute_command( 'ps aux | grep \'udp_server.py\' | grep -v grep ') if not out: fail("UDP Server Process not started .. %s " % out) out = server_vm.execute_command('cat /tmp/udp_server_out_%s' % server_ip) if 'starting' not in ''.join(out): error("UDP Server not started .. %s " % out)
def get_event_id_based_log_should_increase_v2(event_id, prev_event, log_type=2, page_size='20', retry_count=6, increase_count_by=1, vs_name=None, start_time=None): if not start_time: start_time = prev_event['start'] for count in range(0, int(retry_count)): resp = get_event_id_based_log_v2(event_id, log_type, page_size, vs_name, start_time=start_time) t_diff = abs(json_utils.json_diff(prev_event, resp, 'count')) if t_diff == int(increase_count_by): break logger.info("T-diff : %d " % t_diff) if t_diff != int(increase_count_by): logger.info('vs[%s] event[%s] error, count expected[%d] != got[%d] after retrying %d times' % \ (vs_name, event_id, int(increase_count_by), t_diff, int(retry_count))) logger_utils.fail( 'vs[%s] event[%s] error, count expected[%d] != got[%d] after retrying %d times' % (vs_name, event_id, int(increase_count_by), t_diff, int(retry_count))) validate_event_description(resp) return resp
def poweron(self, vm_name=None): ''' Power on a VM arguments: vm_name: vm you wish to resume ''' if not vm_name: vm_name = self.vm_json.get('name') @aretry(retry=10, delay=2, period=2) def getVm(): vm = self.server.get_vm_by_name(vm_name) return vm vm = getVm() try: vm.power_on() if vm.get_status() != "POWERED ON": fail("VM could not power on. It is in " + vm.get_status() + " state") logger.info('vm : %s powered on, status: %s' % (vm_name, vm.get_status())) except VIException as e: # If VM is already is powered on, ignore error if 'current state (Powered on)' in str(e): logger.info('vm: %s already is powered on ..' % vm_name) else: fail(e)
def verify_placement_vs_properties(vs_name): """ :param vs_name: :return: """ if not placement_get_vs_by_name(vs_name): return True vip1 = placement_get_vs_vip(vs_name) vip2 = vs_get_vip(vs_name) if vip1 != vip2: logger.trace('VS %s VIP %s and ' 'RM VIP %s dont match' % (vs_name, vip2, vip1)) logger_utils.fail('VS %s VIP %s and ' 'RM VIP %s dont match' % (vs_name, vip2, vip1)) se_grp1 = placement_get_vs_se_grp(vs_name) se_grp2 = vs_get_se_grp(vs_name) if se_grp1 != se_grp2: logger.trace('VS %s SE Grp %s and ' 'RM SE Grp %s dont match' % (vs_name, se_grp2, se_grp1)) logger_utils.fail('VS %s SE Grp %s and ' 'RM SE Grp %s dont match' % ( vs_name, se_grp2, se_grp1))
def block_ips_on_network(network, ip_last_octet_start, ip_last_octet_end): """ Block bunch on IP's on this network as used :param self: :param network: :param ip_last_octet_start: :param ip_last_octet_end: :return: """ config = infra_utils.get_config() mode = config.get_mode() site_name = mode['site_name'] for network_name, network_data in config.testbed[ site_name].networks_json.iteritems(): if network == network_data.get('name'): ip = network_data.get('ip') ip_last_octet = ip.split('.')[-1] if ip_last_octet_start <= ip_last_octet <= ip_last_octet_end: config.testbed[site_name]['networks_json']['ip'] = "" break else: logger_utils.fail( 'Network name not in network address dict of vcenter: %s' % network)
def network_create(name, configured_subnet, static_range_list=None, check_status_code=True): """ :param name: :param configured_subnet: :param static_range_list: :param check_status_code: :return: """ if not configured_subnet: logger_utils.fail('Must specify configured subnet') network_data = {} network_data['name'] = name network_data['uuid'] = name network_data['configured_subnets'] = [] if static_range_list: static_ips = [] for static_range in static_range_list: static_ips.append({'type': 'V4', 'addr': static_range}) configured_subnet = { 'prefix': configured_subnet, 'static_ips': static_ips } network_data['configured_subnets'].append(configured_subnet) return rest.post('network', data=json.dumps(network_data), check_status_code=check_status_code)
def delete_oldest_vhd(self, vhd_name): blob_list = [] vhd = vhd_name.split('-') try: block_blob_service = PageBlobService( account_name=self.storage_account, account_key=self.storage_account_key) generator = block_blob_service.list_blobs(self.container_name) delete_vhd = "%s-%s" % (vhd[0], vhd[1]) for blob in generator: # get specific version vhd files if delete_vhd in blob.name: blob_list.append(blob) except Exception as e: fail('Error while getting vhd list: %s' % e.message) # keep last 4 controller vhd if len(blob_list) < 4: return old_date = blob_list[0].properties.last_modified oldest = blob_list[0] for blob in blob_list: if blob.properties.last_modified < old_date: old_date = blob.properties.last_modified oldest = blob try: logger.info('Deleting %s' % oldest.name) block_blob_service.delete_blob(self.container_name, oldest.name) except Exception as e: fail('Error while deleting vhd file: %s' % e.message)
def run_commands(cli_instructs, ctlr_vm, **kwargs): config = infra_utils.get_config() cli_exe_path = suite_vars.workspace + ( "/python/bin/cli/bin/shell_client.py --user %s --password %s " % (ctlr_vm.user, ctlr_vm.password)) cli_instructs_path = '/tmp/%s' % uuid.uuid1() with open(cli_instructs_path, 'w') as fh: fh.write(string.join(cli_instructs, '\n')) cli_exec = '%s --address %s' % (cli_exe_path, ctlr_vm.ip) env = {} env['PYTHONPATH'] = suite_vars.workspace + '/python/lib' try: cat_proc = subprocess.Popen(['cat', cli_instructs_path], stdout=subprocess.PIPE, env=env) cli_proc = subprocess.Popen(cli_exec.split(), stdin=cat_proc.stdout, stdout=subprocess.PIPE, env=env) cat_proc.stdout.close() output = cli_proc.communicate()[0] logger.info(output) except OSError: logger_utils.fail('Could not open the shell at %s' % cli_exe_path) except subprocess.CalledProcessError: raise os.system('rm %s' % cli_instructs_path)
def delete_vhd_by_name(self, **kwargs): """ Delete VHD by name """ build_dir = kwargs.get('build_dir') vhd_url, vhd_name = self.get_vhd_url(build_dir) logger.info('Got Requests to delete VHD Name:%s' % vhd_name) try: block_blob_service = PageBlobService( account_name=self.storage_account, account_key=self.storage_account_key) generator = block_blob_service.list_blobs(self.container_name) storage_vhds = list() for blob in generator: storage_vhds.append(blob.name) if vhd_name == blob.name: logger.info('vhd_name: %s Found going to delete it' % vhd_name) block_blob_service.delete_blob(self.container_name, vhd_name) return True logger.info('Did not found VHD Name in Storage Blob VHD Name: %s' % vhd_name) logger.info('List of VHD Name in Storage Blob VHD Name: %s' % ', '.join(storage_vhds)) except Exception as e: fail('Error while deleting the VHD name:%s ,exp: %s' % (vhd_name, e.message))
def create_vm(self, **kwargs): """ Create Azure VM with given build """ self.vm_deployment() build_dir = kwargs.get('build_dir') vm_name = self.vm_json.get('name') # Delete VMs and NIcs if already exists self.delete_instance(vm_name=vm_name, raise_error=False) vhd_url, vhd_name = self.get_vhd_url(build_dir) logger.info("Got VHD URL : %s \n VHD name: %s " % (vhd_url, vhd_name)) if not self.vhd_exists(vhd_name): logger.info('Deleting previous controller vhd ..') self.upload_vhd_to_azure(build_dir, vhd_name) # vNetwork name + Subnet controller_ip = self.create_nic(vm_name=vm_name) nic_id = self.get_nic_id(vm_name=vm_name) try: result = self.compute_client.virtual_machines.create_or_update(self.resource_group, \ vm_name, self.vm_parameters(vhd_url, nic_id, vhd_name)) asleep( msg='Creating Virtual in-progress .. internal wait is there.', delay=10) except Exception as e: fail('Error while creating Controller Virtual Machine: %s' % e.message) result.wait() logger.info('Controller : %s Created withIP address: %s' % (vm_name, controller_ip))
def get_vm_ip_for_name(self, vm_name=None, public_ip_address=False): """ Get IP address for given vm """ vm_ip_addr = None if not vm_name: vm_name = self.vm_json.get('name') try: vm_obj = self.compute_client.virtual_machines.get( self.resource_group, vm_name) for interface in vm_obj.network_profile.network_interfaces: logger.debug('Got interface details ..: %s' % interface.id) nic_name = " ".join(interface.id.split('/')[-1:]) #sub="".join(interface.id.split('/')[4]) ip_addr_objs = self.network_client.network_interfaces.get( self.resource_group, nic_name).ip_configurations for ip_obj in ip_addr_objs: logger.info( " Private IP Address: %s , for Nic: %s, IP Config obj:%s" % (ip_obj.private_ip_address, nic_name, ip_obj.name)) if ip_obj.primary: if public_ip_address: vm_ip_addr = ip_obj.public_ip_address else: vm_ip_addr = ip_obj.private_ip_address logger.info("IP Address: %s , for Nic: %s" % (vm_ip_addr, nic_name)) return vm_ip_addr except Exception as e: fail('Error while getting the ip address for vm, exp: %s' % e.message)
def poweroff(self, vm_name=None): ''' Power off a VM arguments: vm_name: vm you wish to resume ''' if not vm_name: vm_name = self.vm_json.get('name') @aretry(retry=10, delay=2, period=2) def getVm(): vm = self.server.get_vm_by_name(vm_name) return vm vm = getVm() if vm: vm.power_off() if vm.get_status() != "POWERED OFF": fail("VM could not power off. It is in " + vm.get_status() + " state") logger.info('vm : %s powered off, status: %s' % (vm_name, vm.get_status())) else: logger.debug('DEBUG: all_vms: %s' % self.server.get_registered_vms()) fail("Can't find the vm %s" % vm_name)
def get_core_filenames(): # N/A return {} se_list = get_vm_of_type('se', state="OPER_UP") ctrlr_list = get_vm_of_type('controller') all_vms = se_list + ctrlr_list if all_vms: result = {} for vm in all_vms: try: file_list = vm.execute_command( 'sudo ls -l /var/lib/avi/archive/*.tar.gz', log_error=False) except Exception as e: logger.info( "Unable to execute command on VM. Vm may be down %s" % str(e.msg)) if not vm.ip: logger.warning('vm is not there , vm name: %s' % vm.name) return {} if vm in ctrlr_list and vm.ip: fail("Controller not in right state") if file_list: result[vm.ip] = str(file_list).splitlines() else: result = {} return result
def get_cloud_sdkconn(): config = get_config() site_name = config.get_mode(key='site_name') cloud_name = config.get_mode(key='cloud') tb_json = config.testbed[site_name].tb_json cloud_json = None logger.info('get_cloud_sdkconn for %s' % cloud_name) try: cloud_json = [cloud_json for cloud_json in tb_json.get('Cloud') \ if cloud_json.get('name') == cloud_name][0] except TypeError: logger.info('Must be no-access cloud?') except IndexError: logger.info("Couldn't find a cloud matching name: %s" % cloud_name) if not cloud_json: cloud_json = None #Setting it back to None as it must have become an empty list try: # Check in vm clouds cloud_json = [cloud_json for cloud_json in tb_json.get('VmCloud') \ if cloud_json.get('name') == cloud_name][0] except TypeError: logger.info('no VmCloud defined in the testbed') if not cloud_json: fail('cloud_json None, No Cloud defined in Testbed file') cloud_sdk_conn = get_vm_cloud_sdk(cloud_json=cloud_json) return cloud_sdk_conn
def verify_iptables_rules(access, ip_address, type, **kwargs): ''' :param access: :param ip_address: :param type: :param kwargs: :return: ''' controllers = infra_utils.get_vm_of_type('controller') for each_controller in controllers: cmd = "iptables -L AVI_INPUT" output = each_controller.execute_command(cmd) if access not in str(output) and 'DROP' not in str(output): logger_utils.fail('IP Tables Rules not configured Properly') cmd = "ipset --list %s -o xml" % access ip_set_output = each_controller.execute_command(cmd) if ip_set_output: ip_set_list = get_elements_set("".join(ip_set_output)) system_ip_set = IPSet(ip_set_list) input_ip_set = append_rules_to_rules_ipset(ip_address, type) if input_ip_set.issubset(system_ip_set): return True logger_utils.fail( 'IP Set Rules not configured Properly for Controller ' '%s' % each_controller.ip)
def remove_ip_addr_group(name, access, **kwargs): ''' :param name: :param access: :param kwargs: :return: ''' access_object, systemconfiguration, mgmt_ip_access_control = \ get_mgmt_access_objects(access) mgmt_ip_access_control = systemconfiguration.get('mgmt_ip_access_control') if not mgmt_ip_access_control: systemconfiguration['mgmt_ip_access_control'] = {} access_object = systemconfiguration.get('mgmt_ip_access_control').get( access) status_code, response = rest.get('ipaddrgroup', name=name) if status_code >= 300: logger_utils.fail('Error in retrieving IP address group') ip_addr_group_ref = response.get("url") for group_ref in access_object['group_refs']: if ip_addr_group_ref == group_ref: access_object['group_refs'].remove(group_ref) rest.put('systemconfiguration', data=systemconfiguration)
def error_counters_should_be_under_threshold(shm_runtime, threshold=0): """ :param shm_runtime: :param threshold: :return: """ hm_type = shm_runtime['health_monitor_type'] if hm_type in [ 'HEALTH_MONITOR_TCP', 'HEALTH_MONITOR_HTTP', 'HEALTH_MONITOR_HTTPS', 'HEALTH_MONITOR_EXTERNAL', 'HEALTH_MONITOR_UDP', 'HEALTH_MONITOR_DNS', 'HEALTH_MONITOR_PING' ]: logger.debug('shm_runtime %s' % shm_runtime) bad_counters = [] if 'curr_count' in shm_runtime and \ len(shm_runtime['curr_count']) > 0: error_list = shm_runtime['curr_count'] for error in error_list: if int(error['count']) > threshold: bad_counters.append("%s: %s" % (error['type'], error['count'])) if len(bad_counters): logger_utils.fail('ERROR! Non zero bad connects: %s' % "".join(bad_counters))
def wait_until_cluster_ready(detailed_state_str="", **kwargs): """ Blocks until the controller cluster state is up or if a detailed_state_str was passed, then returns when the cluster reaches that state """ # uses site_name or config config = AviConfig.get_instance() ctrl_vm = config.get_vm_of_type('controller')[0].ip logger.debug('controller used in wait until cluster ready: %s' % ctrl_vm) rsp = None try: session = get_session() session.password = '******' session.reset_session() status_code, rsp = get('cluster', path='runtime') except Exception as e: fail('Cluster api runtime exception %s' % str(e)) if rsp and status_code == 200: # REVIEW do we need this logic implicitly checking status code still? cluster_state = rsp.get('cluster_state', {}) if ('CLUSTER_UP' in cluster_state.get('state', '') and not 'HA_NOT_READY' in cluster_state.get('state', '')): logger.info('Controller cluster is ready with cluster_state %s' % cluster_state) elif cluster_state.get('reason'): if (detailed_state_str and detailed_state_str in cluster_state.get('reason')): logger.info('Controller cluster is ready with %s' % detailed_state_str) else: fail('cluster state[%s]: %s' % (ctrl_vm, cluster_state.get('state', 'unknown'))) else: fail('cluster state[%s]: %s' % (ctrl_vm, cluster_state.get('state', 'unknown'))) elif rsp is None: fail('Cluster api runtime exception: no response.') else: fail('Cluster api runtime returned %d' % status_code)
def negative_update_server(pool_name, handle, **kwargs): """ :param pool_name: :param handle: :param kwargs: :return: """ server = infra_utils.get_server_by_handle(handle) response_code, json_pool_data = rest.get('pool', name=pool_name) if kwargs.get('port'): for index, rest_server in enumerate(json_pool_data.get('servers')): json_server_data = server.get_json() server_ip = json_server_data.get('ip') if server_ip and server_ip.get( 'addr') == rest_server['ip']['addr']: json_pool_data['servers'][index]['port'] = kwargs.get('port') try: rest.put('pool', name=pool_name, data=json_pool_data) logger_utils.fail('No exception was raised in negative test case') except Exception as e: logger.info('Field port must be in the range 1-65535') return True
def is_pool_servers_in_state(pool_name, down_servers=None, disabled_servers=None, error_string=None): """ :param pool_name: :param down_servers: :param disabled_servers: :param error_string: :return: """ parsed_down_servers = parse_server_string(down_servers) parsed_disabled_servers = parse_server_string(disabled_servers) servers = {} err_str_arr = {} try: server_detail = get_server_runtime(pool_name) for server in server_detail: server_key = '%s:%s' % (server['ip_addr']['addr'], server['port']) state = server['oper_status']['state'] if server_key in servers: logger_utils.fail( 'Multiple servers with same ip:port combination: %s' % server_key) servers[server_key] = state if error_string and state == 'OPER_DOWN': err_str_arr[server_key] = server['oper_status']['reason'] except KeyError, Argument: logger_utils.fail('Rest result did not have required field: %s' % Argument)
def negative_update_pool(pool_name, expected_error=None, **kwargs): """ :param pool_name: :param expected_error: :param kwargs: :return: """ logger.info('update pool %s, fileds: %s' % (pool_name, kwargs)) _, json_pool_data = rest.get('pool', name=pool_name) if kwargs.get('name') or kwargs.get('name') == '': json_pool_data['name'] = kwargs.get('name') if kwargs.get('default_server_port'): json_pool_data['default_server_port'] = kwargs.get( 'default_server_port') if kwargs.get('graceful_disable_timeout'): json_pool_data['graceful_disable_timeout'] = kwargs.get( 'graceful_disable_timeout') if kwargs.get('connection_ramp_duration'): json_pool_data['connection_ramp_duration'] = kwargs.get( 'connection_ramp_duration') try: rest.put('pool', name=pool_name, data=json_pool_data) logger_utils.fail('No exception was raised in negative test case') except Exception as e: if expected_error: if expected_error.lower() not in str(e).lower(): logger_utils.fail('Expected error %s did not occur\n%s' % (expected_error, str(e))) return True
def validate_vs_dns_deleted(vs_dns_name, retries=5, dns_vs_vip=None, **kwargs): """ :param vs_dns_name: :param retries: :param dns_vs_vip: :param kwargs: :return: """ if not dns_vs_vip: logger.info("[SKIPPING] DNS check for VS as no DNS vip provided. Note," " controller based DNS is not supported anymore.") return True count = retries while count: ipl, portl = dns_get_ip_ports_for_fqdn( dns_get_resolver(dns_vs_vip=dns_vs_vip), vs_dns_name) if ipl or portl: count -= 1 logger_utils.asleep(delay=5) else: return True logger_utils.fail("Unexpected[%s]: DNS entries %s, %s found" % (vs_dns_name, ipl, portl))
def validate_vs_dns_info(vs_name, retries=5, **kwargs): """ :param vs_name: :param retries: :param kwargs: :return: """ dns_vs_vip = kwargs.get('dns_vs_vip', '') if not dns_vs_vip: logger.info( "[SKIPPING] DNS check for VS as no DNS vip provided. Note, " "controller based DNS is not supported anymore.") return True import lib.vs_lib as vs_lib vs_json = vs_lib.get_vs(vs_name, tenant=kwargs.get('tenant', 'admin')) if vs_json['type'] == 'VS_TYPE_VH_CHILD': if rest.get_cloud_type() != 'openshift': logger.info("[SKIPPING] DNS check for VS as SNI child are not " "currently supported for non-openshift clouds") return True parent_ref = vs_json['vh_parent_vs_ref'] parent_uuid = parent_ref.split('/')[-1] _, parent_vs = rest.get('virtualservice', uuid=parent_uuid) parent_vs_name = parent_vs['name'] logger.info('SNI child VS detected; doing DNS on parent VS %s' % parent_vs_name) child_fqdn = vs_json['vh_domain_name'] parent_fqdns = [t['fqdn'] for t in parent_vs['dns_info']] if child_fqdn not in parent_fqdns: return False vs_name = parent_vs_name # REVIEW should it be parent or child name? vs_json = parent_vs dns_name = child_fqdn else: dns_name = vs_json['ipam_dns_records'][0]['fqdn'] logger.trace('vs_json: %s' % vs_json) if 'floating_ip' in vs_json: ip = vs_json['vip'][0]['floating_ip']['addr'] else: ip = vs_json['vip'][0]['ip_address']['addr'] ports = sorted([srv['port'] for srv in vs_json['services']]) logger.info("VS [%s]: IP %s, DNS %s, Ports: %s" % (vs_name, ip, dns_name, ports)) count = retries while count: if vs_lib.vs_check_ip_ports(vs_name, ip, dns_name, ports, dns_vs_vip=dns_vs_vip): return True count -= 1 logger_utils.asleep(delay=5) logger_utils.fail("DNS check failed!!")
def verify_no_traffic_errors_on_client_side(): for vm in infra_utils.get_vm_of_type('client'): vm.execute_command('rm -rf /tmp/httptest_io_*') time.sleep(10) resp = vm.execute_command('ls -ltr /tmp/httptest_io_error_* | wc -l') logger.debug('response is %s' % resp) if int(resp[0]) > 0: logger_utils.fail('Errors are generated on client side %s' % resp)
def get_name_from_ref(url_ref, **kwargs): """ Helps to get the Name from given URL Reference """ if not url_ref: logger_utils.fail("URL ref is None") uuid = str(url_ref).split('/')[-1] obj_type = str(url_ref).split('/')[-2] status_code, resp = get(obj_type, uuid=uuid, **kwargs) return resp['name']
def set_key_rotate_period(**kwargs): try: set_controller_properties(**kwargs) except Exception as e: if kwargs.get('should_pass', True): logger_utils.fail("set_controller_properties should fail") return True # do not sleep if expected error is caught logger_utils.asleep(msg='wait', delay=61) # sleeping for 1 min, so that
def get_client_vm(vm_id=None, **kwargs): ''' Returns VM object for client vm''' try: if vm_id: return get_vm_by_id(vm_id) return get_vm_of_type('client')[0] except Exception: fail("ERROR! Did not find a client vm")
def persistence_keys_should_rotate(profile_name, old_keys, count=1): for index in range( 3): # retrying for max 3 times, if keys_should_rotate fails new_keys = get_http_cookie_persistence_keys(profile_name) ret, msg = keys_should_rotate(old_keys, new_keys, int(count)) if not ret or (index == 2 and msg): logger_utils.fail(msg) raise RuntimeError(msg)
def get_uuid_from_ref(url_ref=None): """ Helps to get the UUID from given URL Reference """ if not url_ref: logger_utils.fail("URL ref is None") out = str(url_ref).split('/')[-1] if '#' in out: out = out.split('#') out = out[0] return out