def wait_until_n_cluster_nodes_ready_inner(): rsp = None try: st, rsp = get('cluster/runtime') except Exception as ex: fail('Cluster api runtime exception: %s' % ex) if rsp and st == 200: node_states = rsp.get('node_states', []) cluster_state = rsp.get('cluster_state', {}) cl_state = cluster_state.get('state', 'unknown') up_nodes = 0 for node in node_states: if node.get('state') == 'CLUSTER_ACTIVE': up_nodes += 1 if (up_nodes != n): logger.debug('Cluster (status:%s) expects %d active nodes ' 'but contains %d active nodes' % (cl_state, n, up_nodes)) elif (n == 1 and cl_state == 'CLUSTER_UP_NO_HA'): logger.info('Cluster is ready! Cluster state is %s' % cluster_state) return elif (n == 2 and cl_state == 'CLUSTER_UP_HA_COMPROMISED'): logger.info('Cluster is ready! Cluster state is %s' % cluster_state) return elif (n == 3 and cl_state == 'CLUSTER_UP_HA_ACTIVE'): logger.info('Cluster is ready! Cluster state is %s' % cluster_state) return fail('Cluster runtime response not as expected %s' % (rsp if rsp else 'None'))
def data_merge(a, b): """merges b into a and return merged result NOTE: tuples and arbitrary objects are not handled as it is totally ambiguous what should happen""" key = None try: if a is None or any( isinstance(a, tp) for tp in [str, unicode, int, long, float]): a = b elif isinstance(a, list): # lists can be only appended if isinstance(b, list): # merge lists a.extend(b) else: # append to list a.append(b) elif isinstance(a, dict): # dicts must be merged if isinstance(b, dict): for key in b: if key in a: a[key] = data_merge(a[key], b[key]) else: a[key] = b[key] else: raise fail('Cannot merge non-dict "%s" into dict "%s"' % (b, a)) else: raise fail('NOT IMPLEMENTED "%s" into "%s"' % (b, a)) except TypeError, e: raise fail('TypeError "%s" in key "%s" when merging "%s" into "%s"' % (e, key, b, a))
def get_vm_by_id(self, vm_id): for vm in self.vm_list[self.site_name]: try: if vm.name == vm_id or vm.ip == vm_id: return vm except AttributeError: pass fail("ERROR! get_vm_by_id " + str(vm_id))
def get_vm_by_ip(self, vm_ip): """ FixMe: AV-34041: infra should provide vm object with ip(dhcp case) :param vm_ip: :return: """ for vm in self.vm_list[self.site_name]: try: if vm.name == vm_ip or vm.ip == vm_ip: return vm except AttributeError: pass fail("ERROR! get_vm_by_id " + str(vm_ip))
def get_ip_for_network(self, ip_host=None, vm=None, iter_count=1): if iter_count > self.size(): vm_name = vm.name if vm else '' fail('No Networks found for VM %s' % vm_name) net = self.dequeue() self.enqueue(net) use_net = True if vm: if net.network not in vm.networks['data']: use_net = False if use_net: ip = net.get_ip_for_network(ip_host=ip_host) return (ip, str(net)) return self.get_ip_for_network(ip_host=ip_host, vm=vm, iter_count=iter_count + 1)
def get_parsed_json(config_file, config_vars={}, config_vars_file=None): if config_vars_file: config_vars_file = get_config_abs_path(config_vars_file) try: json_data = json.load(config_vars_file) except Exception as e: fail('Json load failed for config file %s with Exception %s' % (config_file, e)) json_data.update(config_vars) config_vars = json_data abs_path = get_config_abs_path(config_file) logger.info("Parsing Config %s" % abs_path) json_data = resolve_sourced_config_files(abs_path) return json_data, config_vars
def get_config_abs_path(config_file): abs_path = resolve_abs_relative_path(config_file) if abs_path: return abs_path module_path = os.path.split(suite_vars.module_path)[0] cloud_type = get_cloud_type() logger.debug('get_config_abs_path: cloud_type %s' % cloud_type) if cloud_type: abs_path = os.path.join(module_path, 'configs', cloud_type, config_file) if os.path.isfile(abs_path): logger.info("Found %s specific config file" % cloud_type) return abs_path abs_path = os.path.join(module_path, 'configs', config_file) if os.path.isfile(abs_path): logger.info("Found config file under configs directory") return abs_path else: fail('Cannot find the config file %s' % config_file)
def resolve_sourced_config_files(config_file): ret_json_data = {} lines = '' with open(config_file) as f: for line in f.readlines(): match = re.search('^source\s*:\s*"(\S+)"', line) if match: logger.debug('Found another source json path: %s' % match.group(1)) abs_path = get_config_abs_path(match.group(1)) json_data = resolve_sourced_config_files(abs_path) ret_json_data = data_merge(ret_json_data, json_data) else: lines += line try: if lines.strip(): json_data = json.loads(lines) except Exception as e: fail('Json load failed for config file %s with Exception %s' % (config_file, e)) data_merge(ret_json_data, json_data) return ret_json_data
def get_node_config_retries(**kwargs): config = AviConfig.get_instance() mode = config.get_mode() rsp = None try: st, rsp = get('cluster', check_status_code=False) except Exception as ex: fail("get_node_config: sdk-exception %s" % str(ex)) logger.info('get_node_config: Got cluster nodes %s' % str(rsp)) if re.search('Authentication credentials were not provided.', str(rsp)): fail('Controller %s is not running basic auth!', force=True) if re.search('Invalid username', str(rsp)): logger.info( 'get_node_config: invalid username/password admin/%s.Try admin/admin' % config.password) config.switch_mode(user='******', password='******') st, rsp = get('cluster', check_status_code=False) if st != 200: fail("Non 200 status code received %s %s" % (st, rsp)) config.switch_mode(**mode) return rsp['nodes']