Beispiel #1
0
def check_pool_state(pool_name, **kwargs):
    """

    :param pool_name:
    :param kwargs:
    :return:
    """
    down_servers = kwargs.get('down_servers')
    disabled_servers = kwargs.get('disabled_servers')
    error_string = kwargs.get('error_string')
    retry_timeout = int(kwargs.get('retry_timeout', 10))
    retry_interval = float(kwargs.get('retry_interval', 1))
    try:

        @logger_utils.aretry(delay=retry_interval, period=retry_timeout)
        def retry_action():
            return is_pool_servers_in_state(pool_name, down_servers,
                                            disabled_servers, error_string)

        return retry_action()

    except Exception as e:
        stats = get_all_pool_stats(pool_name)
        logger.debug("Failure case : All pool stats %s" % stats)
        logger_utils.error('Did not find pool in expected state after retry '
                           'timeout of %s, down servers: %s, failed with '
                           'error: %s' % (retry_timeout, down_servers, e))
Beispiel #2
0
def get_all_vnic_flow_create_on_primary_se(vs_name):
    se_name = vs_get_primary_se_name(vs_name)
    logger.debug('get_all_dispatcher_stats_on_primary_se: %s' % se_name)
    se_info = get_se_info(se_name, connected=True)
    d_stats = []
    for vnic in se_info['data_vnics']:
        if_name = vnic['if_name']
        params = {'intfname': if_name}
        resp_code, json_data = rest.get('serviceengine',
                                        name=se_name,
                                        path='flowtablestat',
                                        params=params)
        for dsr in json_data:
            if 'dispatch' in dsr:
                d_stats.append(dsr['dispatch'][0])

    if infra_utils.get_cloud_context_type() == 'baremetal':
        vnic = se_info['mgmt_vnic']
        if_name = vnic['if_name']
        params = {'intfname': if_name}
        resp_code, json_data = rest.get('serviceengine',
                                        name=se_name,
                                        path='flowtablestat',
                                        params=params)
        for dsr in json_data:
            if 'dispatch' in dsr:
                d_stats.append(dsr['dispatch'][0])

    c = 0
    for stats in d_stats:
        c = c + stats['flow_rx_create']
    return c
Beispiel #3
0
    def poweroff(self, vm_name=None):
        '''
        Power off a VM
        arguments:
        vm_name: vm you wish to resume
        '''
        if not vm_name:
            vm_name = self.vm_json.get('name')

        @aretry(retry=10, delay=2, period=2)
        def getVm():
            vm = self.server.get_vm_by_name(vm_name)
            return vm

        vm = getVm()
        if vm:
            vm.power_off()
            if vm.get_status() != "POWERED OFF":
                fail("VM could not power off. It is in " + vm.get_status() +
                     " state")
            logger.info('vm : %s powered off, status: %s' %
                        (vm_name, vm.get_status()))
        else:
            logger.debug('DEBUG: all_vms: %s' %
                         self.server.get_registered_vms())
            fail("Can't find the vm %s" % vm_name)
Beispiel #4
0
def server_allow_icmp_pkts(se, svr_vm_hdl, svr_ip):
    # Get server side IP for the SE
    se_back_ip = se_get_ip_for_server(se, svr_ip)
    server_vm = get_server_by_handle(svr_vm_hdl).vm()
    cmd = 'iptables -D INPUT -p icmp -s %s -j DROP' % se_back_ip
    out = server_vm.execute_command(cmd)
    logger.debug('Cmd %s Output: %s' % (cmd, out))
Beispiel #5
0
def make_follower_ready_for_cluster(ctrl_vm, **kwargs):
    """
    Resets Controller password to admin/admin
    """
    config = AviConfig.get_instance()
    mode = config.get_mode()
    logger.debug("Current Default Mode %s" % mode)
    username = mode['user']
    current_password = mode['password']
    logger.info('Reset controller password for %s' % ctrl_vm.ip)
    try:
        config.switch_mode(password=ctrl_vm.password)
        session = create_session(ctrl_vm)
        config.switch_mode(session=session)
        # REVIEW password should be original default password
        reset_admin_user(username=username,
                         password='******',
                         old_password=ctrl_vm.password,
                         **kwargs)
    except Exception as e:
        logger.debug("Trying with admin/admin")
        config.switch_mode(password='******')
        session = create_session(ctrl_vm)
        config.switch_mode(session=session)
        # REVIEW password shoulde original default password
        reset_admin_user(username=username,
                         password='******',
                         old_password='******',
                         **kwargs)
    config.switch_mode(session=None, password=current_password)
Beispiel #6
0
def get_hmon_stats(pool_name, hm_name, handle, field1='', field2=''):
    """

    :param pool_name:
    :param hm_name:
    :param handle:
    :param field1:
    :param field2:
    :return:
    """
    resp_code, resp_data = rest.get('pool',
                                    name=pool_name,
                                    path='/runtime/server/hmonstat')
    common.check_response_for_errors(resp_data)

    # Check if server is in handle format or name
    if ':' in handle:
        name = handle
    else:
        server = infra_utils.get_server_by_handle(handle)
        name = server.ip() + ':' + str(server.port())
        logger.debug('server_name', name)

    shm = resp_data[0].get('server_hm_stat')
    for server in shm:
        if name == server.get('server_name'):
            for hm in server[field1]:
                if hm_name == hm.get('health_monitor_name'):
                    if field2:
                        return hm[field2]
                    else:
                        return hm
 def get_vm_ip_for_name(self, vm_name=None, public_ip_address=False):
     """ Get IP address for given vm """
     vm_ip_addr = None
     if not vm_name:
         vm_name = self.vm_json.get('name')
     try:
         vm_obj = self.compute_client.virtual_machines.get(
             self.resource_group, vm_name)
         for interface in vm_obj.network_profile.network_interfaces:
             logger.debug('Got interface details ..: %s' % interface.id)
             nic_name = " ".join(interface.id.split('/')[-1:])
             #sub="".join(interface.id.split('/')[4])
             ip_addr_objs = self.network_client.network_interfaces.get(
                 self.resource_group, nic_name).ip_configurations
             for ip_obj in ip_addr_objs:
                 logger.info(
                     " Private IP Address: %s , for Nic: %s, IP Config obj:%s"
                     % (ip_obj.private_ip_address, nic_name, ip_obj.name))
                 if ip_obj.primary:
                     if public_ip_address:
                         vm_ip_addr = ip_obj.public_ip_address
                     else:
                         vm_ip_addr = ip_obj.private_ip_address
                         logger.info("IP Address: %s , for Nic: %s" %
                                     (vm_ip_addr, nic_name))
             return vm_ip_addr
     except Exception as e:
         fail('Error while getting the ip address for vm, exp: %s' %
              e.message)
Beispiel #8
0
def error_counters_should_be_under_threshold(shm_runtime, threshold=0):
    """

    :param shm_runtime:
    :param threshold:
    :return:
    """
    hm_type = shm_runtime['health_monitor_type']
    if hm_type in [
            'HEALTH_MONITOR_TCP', 'HEALTH_MONITOR_HTTP',
            'HEALTH_MONITOR_HTTPS', 'HEALTH_MONITOR_EXTERNAL',
            'HEALTH_MONITOR_UDP', 'HEALTH_MONITOR_DNS', 'HEALTH_MONITOR_PING'
    ]:
        logger.debug('shm_runtime %s' % shm_runtime)

        bad_counters = []
        if 'curr_count' in shm_runtime and \
                        len(shm_runtime['curr_count']) > 0:
            error_list = shm_runtime['curr_count']
            for error in error_list:
                if int(error['count']) > threshold:
                    bad_counters.append("%s: %s" %
                                        (error['type'], error['count']))
            if len(bad_counters):
                logger_utils.fail('ERROR! Non zero bad connects: %s' %
                                  "".join(bad_counters))
Beispiel #9
0
 def create_keystone_user(self, username, password, tenant=None):
     logger.debug('Creating keystone user %s' %username)
     if tenant is None:
         tenant_id = self.ks_client.auth_ref.project_id
     else:
         tenant_id = self.ks_client.tenants.find(name=tenant).id
     self.ks_client.users.create(username, password, tenant_id=tenant_id)
Beispiel #10
0
 def wait_until_n_cluster_nodes_ready_inner():
     rsp = None
     try:
         st, rsp = get('cluster/runtime')
     except Exception as ex:
         fail('Cluster api runtime exception: %s' % ex)
     if rsp and st == 200:
         node_states = rsp.get('node_states', [])
         cluster_state = rsp.get('cluster_state', {})
         cl_state = cluster_state.get('state', 'unknown')
         up_nodes = 0
         for node in node_states:
             if node.get('state') == 'CLUSTER_ACTIVE':
                 up_nodes += 1
         if (up_nodes != n):
             logger.debug('Cluster (status:%s) expects %d active nodes '
                          'but contains %d active nodes' %
                          (cl_state, n, up_nodes))
         elif (n == 1 and cl_state == 'CLUSTER_UP_NO_HA'):
             logger.info('Cluster is ready! Cluster state is %s' %
                         cluster_state)
             return
         elif (n == 2 and cl_state == 'CLUSTER_UP_HA_COMPROMISED'):
             logger.info('Cluster is ready! Cluster state is %s' %
                         cluster_state)
             return
         elif (n == 3 and cl_state == 'CLUSTER_UP_HA_ACTIVE'):
             logger.info('Cluster is ready! Cluster state is %s' %
                         cluster_state)
             return
     fail('Cluster runtime response not as expected %s' %
          (rsp if rsp else 'None'))
def verify_no_traffic_errors_on_client_side():
    for vm in infra_utils.get_vm_of_type('client'):
        vm.execute_command('rm -rf /tmp/httptest_io_*')
        time.sleep(10)
        resp = vm.execute_command('ls -ltr /tmp/httptest_io_error_* | wc -l')
        logger.debug('response is %s' % resp)
        if int(resp[0]) > 0:
            logger_utils.fail('Errors are generated on client side %s' % resp)
Beispiel #12
0
def get_interface_mim_stats_for_vs(vs):
    se = vs_get_primary_se_name(vs)
    logger.debug('primary: %s' % se)
    c = get_interface_mim_stats_for_se(se)
    se_list = get_vs_secondary_se_list(vs)
    for se in se_list:
        logger.info('primary: %s' % se)
        c += get_interface_mim_stats_for_se(se)
    return c
Beispiel #13
0
    def create_instance(self, wait=True, **kwargs):
        """ Create an instance from AMI """
        instance_type = self.vm_json.get('type')
        networks = self.vm_json.get('networks')
        offset = kwargs.pop('offset', 10)
        version_tag = kwargs.pop('version_tag', None)
        iam_role = None
        if version_tag is None and instance_type in ['se', 'controller']:
            logger_utils.fail(
                'Please specify version tag of AMI for creating SE/Controller')

        if instance_type == 'controller':
            ami_name = 'Avi-Controller-%s' % version_tag
            iam_role = kwargs.get('iam_role', None)
        elif instance_type == 'se':
            ami_name = 'Avi-SE-%s' % version_tag
        elif instance_type == 'client':
            ami_name = 'Jenkins Client'
        elif instance_type == 'server':
            ami_name = 'jenkins-server'

        image_id = self._get_ami_id(ami_name)
        sec_grps = self._get_sec_grp_ids(SEC_GROUP[instance_type])
        interfaces = self._set_mgmt_interfaces(networks, offset, sec_grps)

        try:
            # http://boto.readthedocs.org/en/latest/ref/ec2.html#boto.ec2.connection.EC2Connection.run_instances
            self.reservation = self.ec2.run_instances(
                #placement=REGION + 'a',
                image_id=image_id,
                instance_type=INSTANCE_SIZE[instance_type],
                network_interfaces=interfaces,
                instance_profile_name=iam_role)  #,
            #**kwargs)
        except Exception as e:
            logger_utils.fail(e)
        logger.info('Created reservation for instance: %s' % self.reservation)
        if wait:
            instance = self.reservation.instances[0]
            logger_utils.asleep(delay=10)
            logger.info('Wait until instance %s goes to running state' %
                        instance.id)
            ip_addr = self._wait_until_instance_in_state(instance)
            try:
                vm_name = self.vm_json.get('name')
                instance.add_tag('Name', vm_name)
                instance.add_tag('Owner', vm_name)
                wait_until_vm_ready([ip_addr])
                logger.debug('Set data interfaces for instance: %s' %
                             instance.id)
                #offset = self.__set_data_interfaces(
                #instance, vm, networks, offset, sec_grps)
            except Exception as e:
                logger_utils.fail(e)
            return instance, offset
Beispiel #14
0
def verify_cluster_mode(result, **kwargs):
    controllers = infra_utils.get_vm_of_type('controller')
    for node in result:
        ip_addr = node['ip']['addr']
        ctrl_ips = [ctrl.ip for ctrl in controllers]
        if ip_addr not in ctrl_ips:
            logger.debug(
                'Cluster is not up in desired mode(ip). Could not find %s in controller ips %s'
                % (ip_addr, ctrl_ips))
            return True
    return False
Beispiel #15
0
def set_workspace():
    workspace = ''
    thispath = os.path.realpath(__file__)
    workspace = dirname(dirname(
        dirname(thispath)))  # $workspace/test/avitest/conftest.py
    #workspace = subprocess.check_output(
    #    'git rev-parse --show-toplevel', shell=True)
    #workspace = workspace.strip(' \n\t')
    if not workspace:
        raise Exception('ERROR! Could not setup workspace')
    logger.debug('Setting workspace:: %s' % workspace)
    return workspace
Beispiel #16
0
    def sdk_connect(self):

        auth_url = self.configuration['auth_url']
        user = self.configuration['username']
        password = self.configuration['password']
        logger.debug('password from config = %s' % password)
        # FIXME read from rc
        password = '******'
        project_id = user
        nova = NovaClient('2', user, password,
                        project_id, auth_url)
        return nova
Beispiel #17
0
def check_return_code(request_type, rsp, should_pass, ignore_bad_code=False):
    if ignore_bad_code and rsp.status_code >= 300:
        print('Return bad response: %s - %s , Ignore set to %r' %
              (rsp.status_code, rsp.content, ignore_bad_code))
        return
    logger.debug("%d:: %s :: %r" % (rsp.status_code, rsp.content, should_pass))
    if rsp.status_code >= 300 and should_pass and not ignore_bad_code:
        fail('%s return bad response: code %s, content %s' %
             (request_type, rsp.status_code, rsp.content))

    elif rsp.status_code < 300 and not should_pass and not ignore_bad_code:
        fail('%s should have failed: code %s, content %s' %
             (request_type, rsp.status_code, rsp.content))
Beispiel #18
0
def get_internal(obj_type,
                 obj_name,
                 core=0,
                 ret_all=False,
                 disable_aggregate=None,
                 **kwargs):
    """

    :param obj_type:
    :param obj_name:
    :param core:
    :param ret_all:
    :param disable_aggregate:
    :param kwargs:
    :return:
    """
    # REVIEW: re-examine where this should go

    path = '/runtime/internal'
    if disable_aggregate:
        path += '?disable_aggregate=%s' % disable_aggregate

    resp_code, resp_data = rest.get(obj_type,
                                    name=obj_name,
                                    path=path,
                                    params=kwargs.get('params', {}))

    if disable_aggregate or ret_all:
        logger.debug('Requesting disable_aggregate, returning')
        return resp_data

    for json_data in resp_data:
        proc_id_from_get_data = json_data.get('proc_id')
        if not proc_id_from_get_data:
            continue
        if re.search('C' + str(core), proc_id_from_get_data):
            return json_data

        # Shared memory (not per core)
        if obj_type == 'pool':
            if re.search('so_pool', proc_id_from_get_data):
                return json_data
            else:
                logger_utils.fail(
                    'ERROR! internal data NULL for %s proc_id: so_pool' %
                    obj_type)

    logger_utils.fail('ERROR! internal data NULL for %s core %s' %
                      (path, str(core)))
Beispiel #19
0
def get_random_slave_vm():
    """ Returns a list of VMs with current role of slave """
    slave_vms = []
    for vm in infra_utils.get_vm_of_type('controller'):
        if suite_vars.dns == False:
            vm_ip = vm.ip
        else:
            vm_ip = vm.name
        role = get_node_role(vm_ip)
        if role == 'CLUSTER_FOLLOWER':
            slave_vms.append(vm)
        logger.debug('Role of vm %s is %s' % (vm_ip, role))
    if not slave_vms:
        return None
    return random.choice(slave_vms)
Beispiel #20
0
 def poweroff(self, vm_name=None):
     """ Power off openstack vm """
     if not vm_name:
         vm_name = self.vm_json.get('name')
     vm = None
     try:
         vm = self.nova.servers.find(name='%s' % vm_name)
         logger.info('vm state %s' %vm.status)
         if vm.status == 'SHUTOFF':
             logger.info('vm is already powered off, return')
             return True
     except Exception:
         error("can't find vm %s in openstack, exp: %s" % (vm_name, e))
     if vm:
         logger.debug('Found vm: %s to poweroff' % vm_name)
         vm.stop()
     return (self.check_vm_status(vm_name, 'SHUTOFF'))
Beispiel #21
0
 def wait_until_vm_is_up(self):
     instance = self.reservation.instances[0]
     logger_utils.asleep(delay=10)
     logger.info('Wait until instance %s goes to running state' %
                 instance.id)
     ip_addr = self._wait_until_instance_in_state(instance)
     try:
         vm_name = self.vm_json.get('name')
         instance.add_tag('Name', vm_name)
         instance.add_tag('Owner', vm_name)
         wait_until_vm_ready([ip_addr])
         logger.debug('Set data interfaces for instance: %s' % instance.id)
         #offset = self.__set_data_interfaces(
         #instance, vm, networks, offset, sec_grps)
     except Exception as e:
         logger_utils.fail(e)
     return instance
def is_there_IO_error(client_range,
                      log_file='httptest_io_error*',
                      raise_exception=False):
    """
        While traffic genearation IO errors are generally logged at
        /tmp/httptest_<timestamp>.log. The function checks if
        the log file is present or not.
    """

    if isinstance(log_file, basestring):
        logs = [log_file]
    else:
        logger_utils.fail(
            'HttpTest failed. Error - Log file should be of type string, but got : %s'
            % log_file)

    for _log_file in logs:

        logger.info('is_there_IO_error: %s\n' % log_file)

        clients = get_clients_from_range(client_range)
        vm, ip = traffic_manager.get_client_by_handle(clients[0])
        logger.debug('VM IP, NAME, CLIENT: %s, %s, %s' % (vm.ip, vm.name, ip))
        cmd = 'tail -5 %s' % log_file
        resp = vm.execute_command(cmd)
        if len(resp) > 0 and raise_exception:
            error_msg = 'Get request failed\n'
            for error in resp:
                try:
                    msg = json.loads(error)
                except Exception:
                    # When httptest fails, it doesn't write error log in json
                    # format.
                    logger_utils.error('HttpTest failed. Error - %s' % error)
                error_msg += 'Client: %s\nValidation: %s\nExpected: %s\nActual: ' \
                             '%s\n\n' % (msg['client'], msg['error_code'],
                                         msg['expected'], msg['actual'])
            # Cleaning up before raising exception
            vm.execute_command('rm %s &> /tmp/httptest' % log_file)
            logger_utils.error(error_msg)
        else:
            if len(resp) == 0:
                return 'False'
            else:
                logger.info('Failures: %s' % resp)
                return 'True'
Beispiel #23
0
def get_node_role(ip, **kwargs):
    logger.debug('get_node_role for ip %s' % ip)

    nodes = []
    nodes = cluster.get_node_config(**kwargs)
    nodes_by_ip = {}
    # AV-18689 this workaround is for the mesos AWS testbed,
    # where the single cluster node has internal ip not matching the vm.ip
    if len(nodes) == 1:
        nodes_by_ip[ip] = nodes[0]
    else:
        for node in nodes:
            nodes_by_ip[node['ip']['addr']] = node
    @aretry(retry=10, delay=5, period = 5)
    def get_cluster_runtime():
        code, rsp = get('cluster', path='runtime')
        return rsp
    rsp = get_cluster_runtime()
    try:
        node_runtimes = rsp['node_states']
    except:
        logger.info('Error getting the cluster runtime. Got %s from the API' %
                    rsp)
        fail('Error getting the cluster runtime. Got %s from '
                           'the API' % rsp)

    if len(nodes_by_ip) != len(node_runtimes):
        logger.info('There are %d configured nodes, but %d active nodes '
                    'in the cluster runtime.' %
                    (len(nodes_by_ip), len(node_runtimes)))
        fail('There are %d configured nodes, but %d active '
                           'nodes in the cluster runtime.' %
                           (len(nodes_by_ip), len(node_runtimes)))

    for node in node_runtimes:
        if (node['name'] == nodes_by_ip[ip]['name'] or
                node['name'] == nodes_by_ip[ip]['vm_uuid']):
            logger.info('role for %s is %s' % (ip, node['role']))
            return node['role']

    fail('get_node_role: Could not find an active node with name '
                       '%s, so could not get its role' % ip)
def verify_persistence_entries(map):
    if not map:
        raise RuntimeError('Empty persistence map')
    for key in map.keys():
        for se, val in map.iteritems():
            if key == se:
                continue
            if map[key] != val:
                logger.info('Entries preset in %s and not in %s::' % (key, se))
                logger.info('%s' % str(set(val.keys()) - set(map[key].keys())))

                logger.info('Entries preset in %s and not in %s::' % (se, key))
                logger.info('%s' % str(set(map[key].keys()) - set(val.keys())))

                diff = [(k, val[k], v)
                        for k, v in map[key].iteritems() if k in val and val[k] != v]
                logger.info('Client IPs having different server Ips::')
                logger.info(diff)
                logger.debug('Comparing: %s\nAND\n%s' % (map[key], val))
                logger_utils.fail('Persistence entries do not match for %s and %s' % (key, se))
Beispiel #25
0
 def retry_wait():
     rsp = None
     try:
         status_code, rsp = get('cluster', path='runtime')
     except Exception as ex:
         fail('Cluster api runtime exception: %s' % ex)
     if rsp:
         cluster_state = rsp.get('cluster_state', {})
         if (detailed_state and
             detailed_state in cluster_state.get('reason')):
             return True
         elif (not detailed_state and 'CLUSTER_UP' in cluster_state.get('state', '')):
             logger.info('Controller cluster is ready. It is in %s state' % cluster_state)
             return True
         elif ('CLUSTER_UP' in cluster_state.get('state', '') and
             not 'HA_NOT_READY' in cluster_state.get('state', '')):
             return True
         logger.debug('cluster state[%s]: %s' % (rsp['node_states'], cluster_state.get('state', 'unknown')))
     else:
         fail('Cluster api runtime exception: no response.')
Beispiel #26
0
def configure_cluster(wait_time=600, **kwargs):
    ignore_follower_reset = kwargs.pop('ignore_follower_reset', False)
    logger.debug("::ignore_follower_reset:: %s" % ignore_follower_reset)
    nodes = []
    controllers = infra_utils.get_vm_of_type('controller')
    ip_list = [vm.ip for vm in controllers]
    leader_ip = ip_list[0]
    ctrl_vm = None
    for vm in controllers:
        if vm.ip == leader_ip:
            ctrl_vm = vm
    for vm in controllers:
        nodes.append({'ip': {'addr': vm.ip, 'type': 'V4'}, 'name': vm.ip})
        if vm.ip != ctrl_vm.ip and not ignore_follower_reset:
            make_follower_ready_for_cluster(vm, **kwargs)
    cluster_obj = {'name': DEFAULT_CLUSTER_NAME, 'nodes': nodes}
    logger.info("configure_cluster with nodes %s" % (nodes))
    st, rsp = put('cluster', data=json.dumps(cluster_obj))
    wait_until_n_cluster_nodes_ready(len(controllers),
                                     wait_time=wait_time,
                                     **kwargs)
Beispiel #27
0
    def _set_mgmt_interfaces(self, networks, offset, sec_grps):
        """ Return a NetworkInterfaceCollection object for an instance """
        interfaces = []
        logger.debug('networks: %s' % networks)
        device_index = 0
        vm_ip = self.vm_json.get('ip', None)
        for key, network in networks.iteritems():
            key_var = '${' + str(key) + '}'
            subnet = self._get_subnet(network)
            if key == 'management' or 'mgmt':
                logger.debug('Adding management network on device: %s' %
                             device_index)
                # Assumption: Everything is DHCP in management network.
                logger.debug('subnet: %s, subnet.id: %s' % (subnet, subnet.id))
                if vm_ip:
                    interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
                        subnet_id=subnet.id,
                        groups=sec_grps,
                        device_index=device_index,
                        private_ip_address=vm_ip)
                else:
                    interface = boto.ec2.networkinterface.NetworkInterfaceSpecification(
                        subnet_id=subnet.id,
                        groups=sec_grps,
                        description='Mgmt',
                        device_index=device_index)
                interfaces.append(interface)

        interfaces = boto.ec2.networkinterface.NetworkInterfaceCollection(
            *interfaces)
        return interfaces
Beispiel #28
0
def vrf_add_ibgp_profile_peer(vrf_name, asnum, nw_name, md5):
    """

    :param vrf_name:
    :param asnum:
    :param nw_name:
    :param md5:
    :return:
    """
    json_data = vrf_get(vrf_name)
    if 'bgp_profile' not in json_data:
        json_data['bgp_profile'] = dict()
    json_data['bgp_profile']['local_as'] = int(asnum)
    json_data['bgp_profile']['ibgp'] = True
    peer = dict()
    peer['remote_as'] = int(asnum)

    config = infra_utils.get_config()
    import lib.network_lib as network_lib
    peer_ip = network_lib.get_ip_for_last_octet(nw_name, '1')
    peer['peer_ip'] = {'addr': peer_ip, 'type': 'V4'}
    peer['subnet'] = {'ip_addr': {'addr': peer_ip, 'type': 'V4'},
                      'mask': int(network_lib.get_mask_for_network(nw_name))}
    peer['network_ref'] = '/api/network?name=' + nw_name
    peer['md5_secret'] = md5
    peer['bfd'] = True

    if 'peers' not in json_data['bgp_profile']:
        json_data['bgp_profile']['peers'] = []

    for peer_data in json_data['bgp_profile']['peers']:
        if peer_data['peer_ip']['addr'] == peer_ip:
            logger.debug('Peer %s already configured' % peer_ip)
            return

    json_data['bgp_profile']['peers'].append(peer)
    rest.put('vrfcontext', name=vrf_name, data=json_data)
Beispiel #29
0
def vrf_del_ibgp_profile_peers(vrf_name, nw_names):
    """

    :param vrf_name:
    :param nw_names:
    :return:
    """
    json_data = vrf_get(vrf_name)
    if 'bgp_profile' not in json_data:
        logger.debug('No BGP Profile configured')
        return
    if 'peers' not in json_data['bgp_profile']:
        logger.debug('No BGP Peers configured')
        return
    config = infra_utils.get_config()
    import lib.network_lib as network_lib
    for nw_name in nw_names:
        peer_ip = network_lib.get_ip_for_last_octet(nw_name, '1')
        for i, peer_data in enumerate(json_data['bgp_profile']['peers']):
            if peer_data['peer_ip']['addr'] == peer_ip:
                logger.debug('Peer %s found for delete' % peer_ip)
                json_data['bgp_profile']['peers'].pop(i)
                break
    rest.put('vrfcontext', name=vrf_name, data=json_data)
Beispiel #30
0
def get_node_role(ip, **kwargs):
    """

    :param ip:
    :param kwargs:
    :return:
    """

    logger.debug('get_node_role for ip %s' % ip)
    if rest.get_cloud_type() == 'gcp':
        ctrl_vm = infra_utils.get_vm_of_type('controller')[0]
        vm_public_ip = ctrl_vm.vm_public_ip
    else:
        vm_public_ip = ip
    if not suite_vars.dns:
        vm_public_ip = ip
    else:
        ctrl_vms = infra_utils.get_vm_of_type('controller')
        for vm in ctrl_vms:
            if ip == vm.ip:
                vm_public_ip = vm.name
                ip = vm.name
                break
    nodes = []
    nodes = cluster.get_node_config(vm_public_ip, **kwargs)
    nodes_by_ip = {}
    # AV-18689 this workaround is for the mesos AWS testbed,
    # where the single cluster node has internal ip not matching the vm.ip
    if len(nodes) == 1:
        nodes_by_ip[vm_public_ip] = nodes[0]
    else:
        for x in nodes:
            nodes_by_ip[x['ip']['addr']] = x
    for _ in xrange(10):
        path = 'cluster/runtime'
        try:
            status_code, rsp = rest.get(path)
            break
        except:
            logger.info('Could not get cluster runtime')
            logger_utils.asleep(delay=5)
    try:
        node_runtimes = rsp['node_states']
    except:
        logger.info('Error getting the cluster runtime. Got %s from the API' %
                    rsp.text)
        logger_utils.fail('Error getting the cluster runtime. Got %s from '
                           'the API' % rsp.text)

    if len(nodes_by_ip) != len(node_runtimes):
        logger.info('There are %d configured nodes, but %d active nodes '
                    'in the cluster runtime.' %
                    (len(nodes_by_ip), len(node_runtimes)))
        logger.info('Waiting for 5 min to try and fix things...')
        logger_utils.asleep(delay=60 * 5)
        logger_utils.fail('There are %d configured nodes, but %d active '
                           'nodes in the cluster runtime.' %
                           (len(nodes_by_ip), len(node_runtimes)))

    for node in node_runtimes:
        try:
            if (node['name'] == nodes_by_ip[ip]['name'] or
                    node['name'] == nodes_by_ip[ip]['vm_uuid']):
                logger.info('role for %s is %s' % (ip, node['role']))
                return node['role']
        except Exception as e:
            # Sometime get_node_config on dns name will returns nodes with ip when set to default
            # And so the nodes_by_ip dict will have ip as keys instead of dns names
            # And vice versa
            if suite_vars.dns:
                ctrl_vms = infra_utils.get_vm_of_type('controller')
                for vm in ctrl_vms:
                    if ip == vm.ip:
                        ip = vm.name
                        break
                    elif ip == vm.name:
                         ip = vm.ip
                         break
            if (node['name'] == nodes_by_ip[ip]['name'] or
                    node['name'] == nodes_by_ip[ip]['vm_uuid']):
                logger.info('role for %s is %s' % (ip, node['role']))
                return node['role']

    logger_utils.fail('get_node_role: Could not find an active node with name '
                       '%s, so could not get its role' % ip)