def get_all_vnic_flows_created_on_all_secondary_se(virtualservice): se_name_list = get_vs_secondary_se_list(virtualservice) logger.info('get dispatcher stats on secondary: se_name_list %s' % se_name_list) c = 0 for se_name in se_name_list: se_info = get_se_info(se_name, connected=True) d_stats = [] for vnic in se_info['data_vnics']: if_name = vnic['if_name'] params = {'intfname': if_name} resp_code, json_data = rest.get('serviceengine', name=se_name, path='flowtablestat', params=params) for dsr in json_data: if 'dispatch' in dsr: d_stats.append(dsr['dispatch'][0]) if infra_utils.get_cloud_context_type() == 'baremetal': vnic = se_info['mgmt_vnic'] if_name = vnic['if_name'] params = {'intfname': if_name} resp_code, json_data = rest.get('serviceengine', name=se_name, path='flowtablestat', params=params) for dsr in json_data: if 'dispatch' in dsr: d_stats.append(dsr['dispatch'][0]) for stats in d_stats: c = c + stats['flow_rx_create'] return c
def wait_for_intelligent_scalein(vs_name): """ :param vs_name: :return: """ _, vs_obj = rest.get('virtualservice', name=vs_name) pool_ref = pool_lib._get_pool_from_vs(vs_obj) pool_uuid = pool_ref.split('pool/')[1].split('#')[0] _, pool_obj = rest.get('pool', uuid=pool_uuid) asp_ref = pool_obj['autoscale_policy_ref'] as_policy_uuid = asp_ref.split('serverautoscalepolicy/')[1].split('#')[0] _, autoscale_policy = rest.get('serverautoscalepolicy', uuid=as_policy_uuid) min_size = autoscale_policy['min_size'] # now wait for time such that num_servers == min_size for _ in xrange(30): _, pool_obj = rest.get('pool', uuid=pool_uuid) num_servers = len(pool_obj['servers']) if num_servers <= min_size: break logger_utils.asleep(delay=15) if num_servers > min_size: logger_utils.fail('scalein did not succeed pool %s' % (str(pool_obj['servers'])))
def get_pool_default_server_name(vs_name, **kwargs): _, vs_obj = rest.get('virtualservice', name=vs_name) pool_ref = pool_lib._get_pool_from_vs(vs_obj, **kwargs) pool_uuid = pool_ref.split('pool/')[1].split('#')[0] _, pool_obj = rest.get('pool', uuid=pool_uuid) server_obj = pool_obj['servers'][0] return server_obj['hostname'] + ':' + str(server_obj['port'])
def test_controller_goes_down(self): ''' Test the request on sessions if the controller goes down Does the request on the sessions work when the controller comes back up ''' switch_mode(user='******', password='******') logger.info('Configuring cloud, This may take sometime..') setup_cloud(wait_for_cloud_ready=True) config = get_config() mode = config.get_mode() controller = config.get_vm_of_type('controller')[0] session = rest.get_session() data_1 = rest.get('serviceengine') cloud_obj = config.testbed[mode['site_name']].cloud_obj[mode['cloud']] controller_name = controller.name cloud_obj.powerOffVM(controller_name) try: rest.get('serviceengine') except Exception as e: logger.info('Expected ReadTimeout: ' + str(e)) cloud_obj.powerOnVM(controller_name) wait_until_n_cluster_nodes_ready() data_2 = rest.get('serviceengine') assert data_1 == data_2
def add_autoscale_alerts_v2(pool_name, alert_name, scaleout='true', **kwargs): """ :param pool_name: :param alert_name: :param scaleout: :param kwargs: :return: """ _, pool_obj = rest.get('pool', uuid=pool_name) asp_ref = pool_obj['autoscale_policy_ref'] asp_uuid = asp_ref.split('serverautoscalepolicy/')[1].split('#')[0] _, asp_obj = rest.get('serverautoscalepolicy', uuid=asp_uuid) _, acfg_obj = rest.get('alertconfig', name=alert_name) alert_ref = acfg_obj['url'] if scaleout: if 'scaleout_alertconfig_refs' in asp_obj: asp_obj['scaleout_alertconfig_refs'].append(alert_ref) else: asp_obj['scaleout_alertconfig_refs'] = [alert_ref] else: if 'scalein_alertconfig_refs' in asp_obj: asp_obj['scalein_alertconfig_refs'].append(alert_ref) else: asp_obj['scalein_alertconfig_refs'] = [alert_ref] logger.info('updating as_policy %s' % asp_obj) rc, asp_obj = rest.put('serverautoscalepolicy', name=asp_obj['name'], data=asp_obj) as_info = get_autoscale_info(pool_name) assert as_info
def set_systemconfiguration(**kwargs): logger.info('-- set_systemconfiguration -- \n') _, data = get('systemconfiguration') data['portal_configuration']['password_strength_check'] = False logger.info('Data after: %s---' % data) if kwargs.get('dns_configuration', 'True') == 'True': ip_addr = {'addr': '10.10.0.100', 'type': 'V4'} data['dns_configuration']['server_list'] = [] data['dns_configuration']['server_list'].append(ip_addr) data['dns_virtualservice_uuids'] = [] if kwargs.get('dns_virtualservice_uuids'): vs_uuid = get_uuid_by_name('virtualservice', kwargs.get('dns_virtualservice_uuids')) # REVIEW how does this work? data['dns_virtualservice_uuids'].append(vs_uuid) try: put('systemconfiguration', data=json.dumps(data)) except Exception as e: logger.info('put failed %s. Retrying' % e) put('systemconfiguration', data=json.dumps(data)) try: _, r = get('systemconfiguration') except Exception as e: logger.info('get systemconfiguration failed with %s. Retrying' % e) _, r = get('systemconfiguration') logger.info('\n --- Get system configuration: %s --- \n' % r)
def set_ha_mode_best_effort(): logger.info('-- set_ha_mode_best_effort -- \n') # REVIEW is this used anywhere? @aretry(retry=10, delay=60, period=10) def get_serviceenginegroup(): return get('serviceenginegroup') _, data = get('serviceenginegroup') for _data in data['results']: _data['ha_mode'] = 'HA_MODE_SHARED' _data['algo'] = 'PLACEMENT_ALGO_DISTRIBUTED' _data['buffer_se'] = 1 _data['min_scaleout_per_vs'] = 1 logger.info('Data after changing, before post: %s---' % _data) try: r = put('serviceenginegroup', uuid=_data['uuid'], data=json.dumps(_data)) except Exception as e: logger.info('put failed %s. Retrying' % e) r = put('serviceenginegroup', uuid=_data['uuid'], data=json.dumps(_data)) try: _, r = get('serviceenginegroup') except Exception as e: logger.info('put failed %s. Retrying' % e) _, r = get('serviceenginegroup') logger.info('\n --- Get serviceenginegroup: %s --- \n' % r)
def get_ip_addresses_assigned(self): import avi_objects.rest as rest from avi_objects.avi_config import AviConfig ip_list = [] config = AviConfig.get_instance() mode = config.get_mode() current_tenant = mode['tenant'] current_cloud = mode['cloud'] config.switch_mode(tenant='*', cloud=None) # Get Virtualservice IPs st, virtualservices = rest.get('virtualservice?page_size=1000') virtualservices = virtualservices['results'] for vs_name in virtualservices: vips_obj = vs_name.get('vip', []) for vip in vips_obj: if 'ip_address' in vip: ip_list.append(vip['ip_address']['addr']) if 'ip6_address' in vip: ip_list.append(vip['ip6_address']['addr']) # Get Pool Servers IPs st, pools = rest.get('pool?page_size=1000') pools = pools['results'] for pool in pools: servers = pool.get('servers', []) for server in servers: ip_list.append(server['ip']['addr']) config.switch_mode(tenant=current_tenant, cloud=current_cloud) logger.trace('Configured IP Addresses %s' % ip_list) return ip_list
def get_all_vnic_flow_create_on_primary_se(vs_name): se_name = vs_get_primary_se_name(vs_name) logger.debug('get_all_dispatcher_stats_on_primary_se: %s' % se_name) se_info = get_se_info(se_name, connected=True) d_stats = [] for vnic in se_info['data_vnics']: if_name = vnic['if_name'] params = {'intfname': if_name} resp_code, json_data = rest.get('serviceengine', name=se_name, path='flowtablestat', params=params) for dsr in json_data: if 'dispatch' in dsr: d_stats.append(dsr['dispatch'][0]) if infra_utils.get_cloud_context_type() == 'baremetal': vnic = se_info['mgmt_vnic'] if_name = vnic['if_name'] params = {'intfname': if_name} resp_code, json_data = rest.get('serviceengine', name=se_name, path='flowtablestat', params=params) for dsr in json_data: if 'dispatch' in dsr: d_stats.append(dsr['dispatch'][0]) c = 0 for stats in d_stats: c = c + stats['flow_rx_create'] return c
def vinfra_request_api(obj_type, **kwargs): name = kwargs.get("name", None) if name: resp_code, resp_data = rest.get(obj_type, name=name) else: resp_code, resp_data = rest.get(obj_type, **kwargs) return resp_data
def autoscale_lower_max_size(pool_name, **kwargs): """ pass the autoscale policy settings into the kwargs :param pool_name: :param kwargs: :return: """ _, pool_obj = rest.get('pool', name=pool_name) asp_ref = pool_obj['autoscale_policy_ref'] as_policy_uuid = asp_ref.split('serverautoscalepolicy/')[1].split('#')[0] _, autoscale_policy = rest.get('serverautoscalepolicy', uuid=as_policy_uuid) logger.info('received asp %s type %s' % (autoscale_policy, type(autoscale_policy))) orig_max_size = autoscale_policy['max_size'] orig_min_size = autoscale_policy['min_size'] num_servers = len(pool_obj['servers']) for k, v in kwargs.iteritems(): logger.info('k,v %s,%s' % (k, v)) autoscale_policy[k] = v if num_servers < 2: logger_utils.fail('Number of servers is less than required %d' % num_servers) autoscale_policy['max_size'] = num_servers - 1 autoscale_policy['min_size'] = min(autoscale_policy['min_size'], autoscale_policy['max_size']) asp_json = json.dumps(autoscale_policy) logger.info('json: %s' % asp_json) rc, result = rest.put('serverautoscalepolicy', uuid=as_policy_uuid, data=asp_json) logger.info('updating as_policy %s' % autoscale_policy) as_info = get_autoscale_info(pool_name) assert as_info logger_utils.asleep(delay=AS_WAIT_TIME) for _ in xrange(12): logger_utils.asleep(delay=10) _, pool_obj = rest.get('pool', name=pool_name) new_num_servers = len(pool_obj['servers']) if new_num_servers <= num_servers: break _, autoscale_policy = rest.get('serverautoscalepolicy', uuid=as_policy_uuid) autoscale_policy['max_size'] = orig_max_size autoscale_policy['min_size'] = orig_min_size asp_json = json.dumps(autoscale_policy) rc, result = rest.put('serverautoscalepolicy', uuid=as_policy_uuid, data=asp_json) logger.info('json: %s rc: %s results: %s' % (asp_json, rc, result)) return autoscale_policy['max_size']
def manual_vs_autoscale(vs_name, action): """ :param vs_name: pool name for which autoscaling is requested :param action: SCALEOUT or SCALEIN :return: """ _, vs_obj = rest.get('virtualservice', name=vs_name) pool_ref = pool_lib._get_pool_from_vs(vs_obj) pool_uuid = pool_ref.split('pool/')[1].split('#')[0] _, pool_obj = rest.get('pool', uuid=pool_uuid) manual_autoscale(pool_obj['name'], action)
def vrf_get(vrf_name='admin', **kwargs): """ :param vrf_name: :param kwargs: :return: """ uri_specific = kwargs.get('uri_specific', None) if uri_specific: resp_code, json_data = rest.get('vrfcontext', name=vrf_name, path='uri_specific') else: resp_code, json_data = rest.get('vrfcontext', name=vrf_name) return json_data
def get_vs_default_pool_name(vs_name, **kwargs): """ :param vs_name: :param kwargs: :return: """ _, vs_obj = rest.get('virtualservice', name=vs_name) pool_ref = pool_lib._get_pool_from_vs(vs_obj, **kwargs) pool_uuid = rest.get_uuid_from_ref(pool_ref) _, pool_obj = rest.get('pool', uuid=pool_uuid) pool_name = pool_obj['name'] return pool_name
def autoscale_raise_min_size(pool_name, **kwargs): """ pass the autoscale policy settings into the kwargs :param pool_name: :param kwargs: :return: """ _, pool_obj = rest.get('pool', name=pool_name) asp_ref = pool_obj['autoscale_policy_ref'] as_policy_uuid = asp_ref.split('serverautoscalepolicy/')[1].split('#')[0] _, autoscale_policy = rest.get('serverautoscalepolicy', uuid=as_policy_uuid) logger.info('received asp %s type %s ' % (autoscale_policy, type(autoscale_policy))) as_policy_old = copy.deepcopy(autoscale_policy) orig_min_size = autoscale_policy['min_size'] orig_max_size = autoscale_policy['max_size'] num_servers = len(pool_obj['servers']) for k, v in kwargs.iteritems(): logger.info('k,v %s,%s' % (k, v)) autoscale_policy[k] = v autoscale_policy['min_size'] = num_servers + 1 autoscale_policy['max_size'] = max(autoscale_policy['max_size'], autoscale_policy['min_size']) asp_json = json.dumps(autoscale_policy) logger.info(' json: %s' % asp_json) rc, result = rest.put('serverautoscalepolicy', uuid=as_policy_uuid, data=asp_json) logger.info('updating as_policy %s %s %s' % (autoscale_policy, rc, result)) logger_utils.asleep(delay=AS_WAIT_TIME) get_autoscale_info(pool_name) _, pool_obj = rest.get('pool', name=pool_name) num_servers = len(pool_obj['servers']) if num_servers == 0: logger_utils.fail('Pool %s has no up servers' % pool_name) _, autoscale_policy = rest.get('serverautoscalepolicy', uuid=as_policy_uuid) autoscale_policy['min_size'] = orig_min_size autoscale_policy['max_size'] = orig_max_size asp_json = json.dumps(autoscale_policy) logger.info('json: %s' % asp_json) rc, result = rest.put('serverautoscalepolicy', uuid=as_policy_uuid, data=asp_json) logger.info('rc: %s result: %s' % (rc, result)) return autoscale_policy['min_size']
def get_and_delete_all_configs(skip_cloud=False, check_status_code=False, tenant_list=[], fix_url=True, **kwargs): move_all_se_to_group('Default-Group') session = get_session() config = AviConfig.get_instance() defaults = get('default-values').json() logger.info(defaults) tenant_resp = get('tenant').json() if not tenant_list: tenants = [] tenants = [str(entry['name']) for entry in tenant_resp.get('results', [])] else: tenants = tenant_list for _tenant in tenants: switch_mode(tenant=_tenant) for obj_type in reversed(obj_order): if (((obj_type == 'cloud' or obj_type == 'tenant') and skip_cloud) or (obj_type in ['sslcertificaterequest', 'staticroute'])): continue status_code, data = get(obj_type, check_status_code=check_status_code) if status_code > 400: continue for d in data['results']: if obj_type == 'cloud' and d['name'] == 'Default-Cloud': if d['vtype'] != 'CLOUD_NONE': logger.info('Update Default-Cloud from %s to no-access' % d['vtype']) if d.get('vcenter_configuration'): d.pop('vcenter_configuration') elif d.get('openstack_configuration'): d.pop('openstack_configuration') elif d.get('aws_configuration'): d.pop('aws_configuration') elif d.get('cloudstack_configuration'): d.pop('cloudstack_configuration') elif d.get('vca_configuration'): d.pop('vca_configuration') elif d.get('apic_configuration'): d.pop('apic_configuration') d['vtype'] = 'CLOUD_NONE' put('cloud', name=d['name'], data=json.dumps(d)) # review can we use uuid=d['uuid']? if obj_type in defaults.get('default', []) and \ d['uuid'] in defaults['default'][obj_type]: continue logger.info('Deleting: %s:%s' % (obj_type, d['name'])) if obj_type in ['sslcertificaterequest', 'sslkeyandcertificate_import']: delete('sslkeyandcertificate', name=d['name'], check_status_code=False) else: delete(obj_type, name=d['name'], check_status_code=False)
def test_switch_mode_user(self, create_new_user): ''' Switch mode user and test if the session is updated correctly based on mode user ''' switch_mode(user='******', password='******') se_1 = rest.get('serviceengine') session = rest.get_session() assert session.username == 'admin' and session.password == 'avi123' switch_mode(user='******') se_2 = rest.get('serviceengine') session = rest.get_session() assert se_1 == se_2 assert session.username == 'test-user-1' and session.password == 'avi123' clear_session(all_sessions=True)
def test_clear_session_basic(self): ''' Basic usage of clear_session ''' switch_mode(user='******', password='******') rest.get('serviceengine') config = get_config() session = rest.get_session() switch_mode(session=session) assert config.sessions and config.session clear_session() config = get_config() context_key = config.get_context_key() assert config.session is None and context_key not in config.sessions
def test_session_expiry(self): ''' Tests requests on expired sessions ''' switch_mode(user='******', password='******') data = json.dumps({'api_idle_timeout': 1}) rest.put('controllerproperties', data=data) clear_session() data_1 = rest.get('serviceengine') time.sleep(2 * 60) data_2 = rest.get('serviceengine') assert data_1 == data_2 time.sleep(21 * 60) data_1 = rest.get('serviceengine') assert data_1 == data_2
def _get_pool_from_vs(vs_obj, **kwargs): """ Check if there is a poolgroup on this VS, if so get pool from the poolgroup For containers, poolgroup will be there by default :param vs_obj: :param kwargs: :return: """ pool_ref = vs_obj.get('pool_ref', None) if pool_ref: return pool_ref pg_ref = vs_obj.get('pool_group_ref', None) if not pg_ref: logger.info("Didnot find pool or poolgroup on this VS, very strange!!") return None pg_uuid = rest.get_uuid_from_ref(pg_ref) _, pg_obj = rest.get('poolgroup', uuid=pg_uuid) # Pick first member (there will most likely be only one member) pg_mem = pg_obj['members'][0] pool_ref = pg_mem.get('pool_ref', None) if not pool_ref: logger.info("Could not find pool on the VS or on the poolgroup " "associated with the VS") logger.info("vs %s: %s\n; poolgroup %s: %s" % (vs_obj['name'], vs_obj, pg_uuid, pg_obj)) return None return pool_ref
def remove_ip_addr_group(name, access, **kwargs): ''' :param name: :param access: :param kwargs: :return: ''' access_object, systemconfiguration, mgmt_ip_access_control = \ get_mgmt_access_objects(access) mgmt_ip_access_control = systemconfiguration.get('mgmt_ip_access_control') if not mgmt_ip_access_control: systemconfiguration['mgmt_ip_access_control'] = {} access_object = systemconfiguration.get('mgmt_ip_access_control').get( access) status_code, response = rest.get('ipaddrgroup', name=name) if status_code >= 300: logger_utils.fail('Error in retrieving IP address group') ip_addr_group_ref = response.get("url") for group_ref in access_object['group_refs']: if ip_addr_group_ref == group_ref: access_object['group_refs'].remove(group_ref) rest.put('systemconfiguration', data=systemconfiguration)
def update_ip_addr_group(name, access, **kwargs): ''' :param name: :param access: :param kwargs: :return: ''' access_object, systemconfiguration, mgmt_ip_access_control = \ get_mgmt_access_objects(access) mgmt_ip_access_control = systemconfiguration.get('mgmt_ip_access_control') if not mgmt_ip_access_control: systemconfiguration['mgmt_ip_access_control'] = {} access_object = systemconfiguration.get('mgmt_ip_access_control').get( access) status_code, response = rest.get('ipaddrgroup', name=name) if not access_object: access_object = {} access_object['match_criteria'] = 0 if not access_object.get('group_refs'): access_object['group_refs'] = [] access_object['group_refs'].append(response.get('url')) systemconfiguration['mgmt_ip_access_control'][access] = access_object rest.put('systemconfiguration', data=systemconfiguration)
def wait_until_cluster_ready(detailed_state_str="", **kwargs): """ Blocks until the controller cluster state is up or if a detailed_state_str was passed, then returns when the cluster reaches that state """ # uses site_name or config config = AviConfig.get_instance() ctrl_vm = config.get_vm_of_type('controller')[0].ip logger.debug('controller used in wait until cluster ready: %s' % ctrl_vm) rsp = None try: session = get_session() session.password = '******' session.reset_session() status_code, rsp = get('cluster', path='runtime') except Exception as e: fail('Cluster api runtime exception %s' % str(e)) if rsp and status_code == 200: # REVIEW do we need this logic implicitly checking status code still? cluster_state = rsp.get('cluster_state', {}) if ('CLUSTER_UP' in cluster_state.get('state', '') and not 'HA_NOT_READY' in cluster_state.get('state', '')): logger.info('Controller cluster is ready with cluster_state %s' % cluster_state) elif cluster_state.get('reason'): if (detailed_state_str and detailed_state_str in cluster_state.get('reason')): logger.info('Controller cluster is ready with %s' % detailed_state_str) else: fail('cluster state[%s]: %s' % (ctrl_vm, cluster_state.get('state', 'unknown'))) else: fail('cluster state[%s]: %s' % (ctrl_vm, cluster_state.get('state', 'unknown'))) elif rsp is None: fail('Cluster api runtime exception: no response.') else: fail('Cluster api runtime returned %d' % status_code)
def delete_servers(pool_name, how_many, prefix, cleanup_backend=True): """ :param pool_name: :param how_many: :param prefix: :param cleanup_backend: :return: """ logger.info('delete servers from pool %s' % pool_name) config = infra_utils.get_config() context_key = config.get_context_key() pool = config.site_objs[context_key]['pool'].get(pool_name) for count in range(int(how_many)): handle = '%s%s' % (prefix, count + 1) _delete_server_backend(handle, pool, cleanup_backend) for count in range(int(how_many)): handle = '%s%s' % (prefix, count + 1) _delete_server_model(handle, pool) st, pool_json_ctrl = rest.get('pool', name=pool_name) pool_server_json = [] pool_server_dict = pool.servers for pool_server in pool_server_dict.values(): pool_server_json.append(pool_server.get_json()) pool_json_ctrl['servers'] = pool_server_json rest.put('pool', name=pool_name, data=pool_json_ctrl)
def get_all_servers_of_pool(pool_name): status_code, rsp = rest.get('pool', name=pool_name) ret = [] for server in rsp['servers']: ret.append(server['ip']['addr'] + ':%s' % server['port']) logger.info('return list: %s' % ret) return ret
def negative_update_pool(pool_name, expected_error=None, **kwargs): """ :param pool_name: :param expected_error: :param kwargs: :return: """ logger.info('update pool %s, fileds: %s' % (pool_name, kwargs)) _, json_pool_data = rest.get('pool', name=pool_name) if kwargs.get('name') or kwargs.get('name') == '': json_pool_data['name'] = kwargs.get('name') if kwargs.get('default_server_port'): json_pool_data['default_server_port'] = kwargs.get( 'default_server_port') if kwargs.get('graceful_disable_timeout'): json_pool_data['graceful_disable_timeout'] = kwargs.get( 'graceful_disable_timeout') if kwargs.get('connection_ramp_duration'): json_pool_data['connection_ramp_duration'] = kwargs.get( 'connection_ramp_duration') try: rest.put('pool', name=pool_name, data=json_pool_data) logger_utils.fail('No exception was raised in negative test case') except Exception as e: if expected_error: if expected_error.lower() not in str(e).lower(): logger_utils.fail('Expected error %s did not occur\n%s' % (expected_error, str(e))) return True
def update_healthmonitor(hm_name, **kwargs): """ :param hm_name: :param kwargs: :return: """ logger.info('update healthmonitor %s' % hm_name) status_code, json_hm_data = rest.get('healthmonitor', name=hm_name) if kwargs.get('type'): json_hm_data['type'] = kwargs.get('type') if kwargs.get('send_interval'): json_hm_data['send_interval'] = kwargs.get('send_interval') if kwargs.get('receive_timeout'): json_hm_data['receive_timeout'] = kwargs.get('receive_timeout') if kwargs.get('successful_checks'): json_hm_data['successful_checks'] = kwargs.get('successful_checks') if kwargs.get('failed_checks'): json_hm_data['failed_checks'] = kwargs.get('failed_checks') rest.put('healthmonitor', name=hm_name, data=json_hm_data)
def get_hmon_stats(pool_name, hm_name, handle, field1='', field2=''): """ :param pool_name: :param hm_name: :param handle: :param field1: :param field2: :return: """ resp_code, resp_data = rest.get('pool', name=pool_name, path='/runtime/server/hmonstat') common.check_response_for_errors(resp_data) # Check if server is in handle format or name if ':' in handle: name = handle else: server = infra_utils.get_server_by_handle(handle) name = server.ip() + ':' + str(server.port()) logger.debug('server_name', name) shm = resp_data[0].get('server_hm_stat') for server in shm: if name == server.get('server_name'): for hm in server[field1]: if hm_name == hm.get('health_monitor_name'): if field2: return hm[field2] else: return hm
def resize_subnet(name, **kwargs): _, json_data = rest.get('network', name=name) subnet_ip = kwargs.get('subnet_ip', None) s_mask = kwargs.get('subnet_mask', None) new_subnet_mask = kwargs.get('new_subnet_mask', None) stat_ip = kwargs.get('static_ip', None) del_stat_ip = kwargs.get('delete_stat_ip', None) subnet_mask = int(s_mask) for index, subnet in enumerate(json_data['configured_subnets']): if subnet['prefix']['mask'] == subnet_mask and subnet['prefix'][ 'ip_addr']['addr'] == subnet_ip: if new_subnet_mask: json_data['configured_subnets'][index]['prefix'][ 'mask'] = new_subnet_mask if stat_ip: stat_ip_dict = {'type': "V4", 'addr': stat_ip} if 'static_ips' in subnet.keys(): json_data['configured_subnets'][index][ 'static_ips'].append(stat_ip_dict) else: json_data['configured_subnets'][index]['static_ips'] = [ stat_ip_dict ] if del_stat_ip: for static_ip in subnet['static_ips']: if static_ip['addr'] == del_stat_ip: json_data['configured_subnets'][index][ 'static_ips'].remove(static_ip) rest.put('network', name=json_data['name'], data=json_data)
def negative_update_server(pool_name, handle, **kwargs): """ :param pool_name: :param handle: :param kwargs: :return: """ server = infra_utils.get_server_by_handle(handle) response_code, json_pool_data = rest.get('pool', name=pool_name) if kwargs.get('port'): for index, rest_server in enumerate(json_pool_data.get('servers')): json_server_data = server.get_json() server_ip = json_server_data.get('ip') if server_ip and server_ip.get( 'addr') == rest_server['ip']['addr']: json_pool_data['servers'][index]['port'] = kwargs.get('port') try: rest.put('pool', name=pool_name, data=json_pool_data) logger_utils.fail('No exception was raised in negative test case') except Exception as e: logger.info('Field port must be in the range 1-65535') return True