Ejemplo n.º 1
0
def wait_for_intelligent_scalein(vs_name):
    """

    :param vs_name:
    :return:
    """

    _, vs_obj = rest.get('virtualservice', name=vs_name)
    pool_ref = pool_lib._get_pool_from_vs(vs_obj)
    pool_uuid = pool_ref.split('pool/')[1].split('#')[0]
    _, pool_obj = rest.get('pool', uuid=pool_uuid)

    asp_ref = pool_obj['autoscale_policy_ref']
    as_policy_uuid = asp_ref.split('serverautoscalepolicy/')[1].split('#')[0]

    _, autoscale_policy = rest.get('serverautoscalepolicy',
                                   uuid=as_policy_uuid)

    min_size = autoscale_policy['min_size']
    # now wait for time such that num_servers == min_size
    for _ in xrange(30):
        _, pool_obj = rest.get('pool', uuid=pool_uuid)
        num_servers = len(pool_obj['servers'])
        if num_servers <= min_size:
            break
        logger_utils.asleep(delay=15)

    if num_servers > min_size:
        logger_utils.fail('scalein did not succeed pool %s' %
                          (str(pool_obj['servers'])))
Ejemplo n.º 2
0
    def create_vm(self, **kwargs):
        """ Create Azure VM with given build
            
        """
        self.vm_deployment()
        build_dir = kwargs.get('build_dir')
        vm_name = self.vm_json.get('name')

        # Delete VMs and NIcs if already exists
        self.delete_instance(vm_name=vm_name, raise_error=False)

        vhd_url, vhd_name = self.get_vhd_url(build_dir)
        logger.info("Got VHD URL : %s \n VHD name: %s " % (vhd_url, vhd_name))
        if not self.vhd_exists(vhd_name):
            logger.info('Deleting previous controller vhd ..')
            self.upload_vhd_to_azure(build_dir, vhd_name)

        # vNetwork name + Subnet
        controller_ip = self.create_nic(vm_name=vm_name)
        nic_id = self.get_nic_id(vm_name=vm_name)
        try:
            result = self.compute_client.virtual_machines.create_or_update(self.resource_group, \
                    vm_name, self.vm_parameters(vhd_url, nic_id, vhd_name))
            asleep(
                msg='Creating Virtual in-progress .. internal wait is there.',
                delay=10)
        except Exception as e:
            fail('Error while creating Controller Virtual Machine: %s' %
                 e.message)
        result.wait()
        logger.info('Controller : %s Created withIP address: %s' %
                    (vm_name, controller_ip))
Ejemplo n.º 3
0
def validate_vs_dns_deleted(vs_dns_name, retries=5, dns_vs_vip=None, **kwargs):
    """

    :param vs_dns_name:
    :param retries:
    :param dns_vs_vip:
    :param kwargs:
    :return:
    """

    if not dns_vs_vip:
        logger.info("[SKIPPING] DNS check for VS as no DNS vip provided. Note,"
                    " controller based DNS is not supported anymore.")
        return True

    count = retries
    while count:
        ipl, portl = dns_get_ip_ports_for_fqdn(
            dns_get_resolver(dns_vs_vip=dns_vs_vip), vs_dns_name)
        if ipl or portl:
            count -= 1
            logger_utils.asleep(delay=5)
        else:
            return True
        logger_utils.fail("Unexpected[%s]: DNS entries %s, %s found" %
                          (vs_dns_name, ipl, portl))
Ejemplo n.º 4
0
def retry_action_detail(action, retry_count=0, retry_interval=0.1):
    """

    :param action:
    :param retry_count:
    :param retry_interval:
    :return:
    """

    if retry_interval <= 0:
        logger_utils.fail('retry_interval <= 0 is not allowed, was: %s' %
                          retry_interval)
    if retry_count < 0:
        logger_utils.fail('retry_count < 0 is not allowed, was %s' %
                          retry_count)
    retry_count = int(retry_count)

    tries = 1
    success, dbg_str = action()
    if success:
        return dbg_str

    for x in xrange(1, retry_count):
        tries += 1
        success, dbg_str = action()
        if success:
            return dbg_str
        else:
            logger_utils.asleep(delay=retry_interval)

    logger.trace('Last Retry result: %s' % dbg_str)
    logger_utils.fail('%s, failed after %s tries' % (dbg_str, tries))
Ejemplo n.º 5
0
def main():
    args = parse_arguments()
    testbed_path = args.testbed_name
    testbed_abspath, testbed_dir, testbed_name = __testbed_find(testbed_path)
    logger.info("Cleanup Processing for testbed %s" %testbed_name)
    try:
        testbed_data = open(testbed_abspath).read()
        tb_json = simplejson.loads(testbed_data)
        logger.info('test_bed loaded')
    except Exception as e:
        abort('Json load failed for testbed file %s with Exception %s' % (testbed_abspath,e))

    aws_clouds = get_aws_clouds_tb_json(tb_json)
    if not aws_clouds:
        abort("No AWS Clouds Found. Aborting")

    for aws_cloud in aws_clouds:
        cloud_name = aws_cloud.get('name')
        sdk_conn = Aws(cloud_configuration_json= aws_cloud.get('aws_configuration'))
        logger.info("AWS SDK connection successfull for cloud %s" %cloud_name)
        filters = {'tag-key':'avitest_tb_tag', 'tag-value': testbed_name}
        terminate_instances(sdk_conn, filters)
        asleep(msg="Settling with Instances delete" , delay = 10)
        delete_images(sdk_conn, filters)
        delete_network_interfaces(sdk_conn, filters)
        asleep(msg="Settling with Network Interfaces Deletion" , delay = 30)
        delete_security_groups(sdk_conn, filters)
        sdk_conn.disconnect()

    clean_secondary_ips_on_client_server(tb_json)
Ejemplo n.º 6
0
def check_cloud_state(expected_status='CLOUD_STATE_PLACEMENT_READY', **kwargs):
    cloud_name = kwargs.get('cloud_name', None)
    # config = get_config()
    # ctrl_clouds =  kwargs.get('clouds', config.testbed[config.site_name].cloud)
    asleep(msg='waiting for cloud state', delay=10)
    status_code, resp_json = get('cloud-inventory')
    #resp_json = resp.json()
    #if len(ctrl_clouds) != int(resp_json['count']):
    #    error("Number of Configured Clouds not as Received. Configured=%s Received=%s" %(len(ctrl_clouds), resp_json['count']))
    for cloud_obj in resp_json['results']:
        if cloud_name and cloud_name != cloud_obj['config']['name']:
            continue
        if 'error' in cloud_obj['status']:
            error('Received Error in cloud status %s' %
                  cloud_obj['status']['error'])
        cloud_status = cloud_obj['status']['state']
        last_reason = cloud_obj['status'].get('reason', '')
        # To handle special? case where cloud is up but about to be reconfigured
        # REVIEW any other reasons that we need to account for?
        if cloud_status != expected_status or 'Pending re-config' in last_reason:
            if cloud_obj['config']['vtype'] == 'CLOUD_AWS':
                asleep("additional delay for AWS cloud", delay=30)
            error(
                'Cloud Status is not as expected or reason not null.  Expected=%s Received=%s, reason = %s',
                expected_status, cloud_status, last_reason)
    return True
Ejemplo n.º 7
0
def validate_vs_dns_info(vs_name, retries=5, **kwargs):
    """

    :param vs_name:
    :param retries:
    :param kwargs:
    :return:
    """

    dns_vs_vip = kwargs.get('dns_vs_vip', '')
    if not dns_vs_vip:
        logger.info(
            "[SKIPPING] DNS check for VS as no DNS vip provided. Note, "
            "controller based DNS is not supported anymore.")
        return True
    import lib.vs_lib as vs_lib
    vs_json = vs_lib.get_vs(vs_name, tenant=kwargs.get('tenant', 'admin'))
    if vs_json['type'] == 'VS_TYPE_VH_CHILD':
        if rest.get_cloud_type() != 'openshift':
            logger.info("[SKIPPING] DNS check for VS as SNI child are not "
                        "currently supported for non-openshift clouds")
            return True
        parent_ref = vs_json['vh_parent_vs_ref']
        parent_uuid = parent_ref.split('/')[-1]
        _, parent_vs = rest.get('virtualservice', uuid=parent_uuid)
        parent_vs_name = parent_vs['name']

        logger.info('SNI child VS detected; doing DNS on parent VS %s' %
                    parent_vs_name)
        child_fqdn = vs_json['vh_domain_name']
        parent_fqdns = [t['fqdn'] for t in parent_vs['dns_info']]
        if child_fqdn not in parent_fqdns:
            return False
        vs_name = parent_vs_name  # REVIEW should it be parent or child name?
        vs_json = parent_vs
        dns_name = child_fqdn
    else:
        dns_name = vs_json['ipam_dns_records'][0]['fqdn']

    logger.trace('vs_json: %s' % vs_json)
    if 'floating_ip' in vs_json:
        ip = vs_json['vip'][0]['floating_ip']['addr']
    else:
        ip = vs_json['vip'][0]['ip_address']['addr']

    ports = sorted([srv['port'] for srv in vs_json['services']])
    logger.info("VS [%s]: IP %s, DNS %s, Ports: %s" %
                (vs_name, ip, dns_name, ports))
    count = retries
    while count:
        if vs_lib.vs_check_ip_ports(vs_name,
                                    ip,
                                    dns_name,
                                    ports,
                                    dns_vs_vip=dns_vs_vip):
            return True
        count -= 1
        logger_utils.asleep(delay=5)
        logger_utils.fail("DNS check failed!!")
Ejemplo n.º 8
0
def set_key_rotate_period(**kwargs):
    try:
        set_controller_properties(**kwargs)
    except Exception as e:
        if kwargs.get('should_pass', True):
            logger_utils.fail("set_controller_properties should fail")
        return True   # do not sleep if expected error is caught
    logger_utils.asleep(msg='wait', delay=61)  # sleeping for 1 min, so that
Ejemplo n.º 9
0
def wait_for_scale_set_success(ss_name):
    """
    :param ss_name: Azure scale set name
    """
    asleep('Dummy waiting since scale set autoscaling does not take effect instantly', delay=10)
    cloud_type, configuration = rest.get_cloud_type(get_configuration = True)
    azure = Azure(configuration, 'Default-Cloud')
    azure.check_scale_set_status(ss_name)
Ejemplo n.º 10
0
def autoscale_lower_max_size(pool_name, **kwargs):
    """
    pass the autoscale policy settings into the kwargs
    :param pool_name:
    :param kwargs:
    :return:
    """

    _, pool_obj = rest.get('pool', name=pool_name)
    asp_ref = pool_obj['autoscale_policy_ref']
    as_policy_uuid = asp_ref.split('serverautoscalepolicy/')[1].split('#')[0]

    _, autoscale_policy = rest.get('serverautoscalepolicy',
                                   uuid=as_policy_uuid)
    logger.info('received asp %s   type %s' %
                (autoscale_policy, type(autoscale_policy)))
    orig_max_size = autoscale_policy['max_size']
    orig_min_size = autoscale_policy['min_size']

    num_servers = len(pool_obj['servers'])
    for k, v in kwargs.iteritems():
        logger.info('k,v %s,%s' % (k, v))
        autoscale_policy[k] = v
    if num_servers < 2:
        logger_utils.fail('Number of servers is less than required %d' %
                          num_servers)
    autoscale_policy['max_size'] = num_servers - 1
    autoscale_policy['min_size'] = min(autoscale_policy['min_size'],
                                       autoscale_policy['max_size'])

    asp_json = json.dumps(autoscale_policy)
    logger.info('json: %s' % asp_json)
    rc, result = rest.put('serverautoscalepolicy',
                          uuid=as_policy_uuid,
                          data=asp_json)
    logger.info('updating as_policy %s' % autoscale_policy)
    as_info = get_autoscale_info(pool_name)
    assert as_info

    logger_utils.asleep(delay=AS_WAIT_TIME)
    for _ in xrange(12):
        logger_utils.asleep(delay=10)
        _, pool_obj = rest.get('pool', name=pool_name)
        new_num_servers = len(pool_obj['servers'])
        if new_num_servers <= num_servers:
            break
    _, autoscale_policy = rest.get('serverautoscalepolicy',
                                   uuid=as_policy_uuid)
    autoscale_policy['max_size'] = orig_max_size
    autoscale_policy['min_size'] = orig_min_size
    asp_json = json.dumps(autoscale_policy)
    rc, result = rest.put('serverautoscalepolicy',
                          uuid=as_policy_uuid,
                          data=asp_json)
    logger.info('json: %s rc: %s results: %s' % (asp_json, rc, result))
    return autoscale_policy['max_size']
Ejemplo n.º 11
0
def stop_controller_process(vm, process_name, update_pids=True):
    import lib.cluster_lib as cluster_lib
    update_pids = (update_pids or update_pids == 'True')
    proc = process_name
    logger.info('Stopping process %s on controller with IP %s' % (proc, vm.ip))
    vm.stop_upstart_job(proc)
    if update_pids:
        logger_utils.asleep(delay=5)
        cluster_lib.wait_until_cluster_ready()
        cluster_lib.update_processes_for_all_controllers()
Ejemplo n.º 12
0
    def create_instance(self, wait=True, **kwargs):
        """ Create an instance from AMI """
        instance_type = self.vm_json.get('type')
        networks = self.vm_json.get('networks')
        offset = kwargs.pop('offset', 10)
        version_tag = kwargs.pop('version_tag', None)
        iam_role = None
        if version_tag is None and instance_type in ['se', 'controller']:
            logger_utils.fail(
                'Please specify version tag of AMI for creating SE/Controller')

        if instance_type == 'controller':
            ami_name = 'Avi-Controller-%s' % version_tag
            iam_role = kwargs.get('iam_role', None)
        elif instance_type == 'se':
            ami_name = 'Avi-SE-%s' % version_tag
        elif instance_type == 'client':
            ami_name = 'Jenkins Client'
        elif instance_type == 'server':
            ami_name = 'jenkins-server'

        image_id = self._get_ami_id(ami_name)
        sec_grps = self._get_sec_grp_ids(SEC_GROUP[instance_type])
        interfaces = self._set_mgmt_interfaces(networks, offset, sec_grps)

        try:
            # http://boto.readthedocs.org/en/latest/ref/ec2.html#boto.ec2.connection.EC2Connection.run_instances
            self.reservation = self.ec2.run_instances(
                #placement=REGION + 'a',
                image_id=image_id,
                instance_type=INSTANCE_SIZE[instance_type],
                network_interfaces=interfaces,
                instance_profile_name=iam_role)  #,
            #**kwargs)
        except Exception as e:
            logger_utils.fail(e)
        logger.info('Created reservation for instance: %s' % self.reservation)
        if wait:
            instance = self.reservation.instances[0]
            logger_utils.asleep(delay=10)
            logger.info('Wait until instance %s goes to running state' %
                        instance.id)
            ip_addr = self._wait_until_instance_in_state(instance)
            try:
                vm_name = self.vm_json.get('name')
                instance.add_tag('Name', vm_name)
                instance.add_tag('Owner', vm_name)
                wait_until_vm_ready([ip_addr])
                logger.debug('Set data interfaces for instance: %s' %
                             instance.id)
                #offset = self.__set_data_interfaces(
                #instance, vm, networks, offset, sec_grps)
            except Exception as e:
                logger_utils.fail(e)
            return instance, offset
Ejemplo n.º 13
0
 def terminate_instance(self, instance_name):
     """ Terminate instance on AWS """
     self.sdk_connect()
     instance_id = self._get_instance_id(instance_name)
     logger.info('Terminate instance: %s', instance_name)
     self.ec2.terminate_instances(instance_ids=[instance_id])
     # TODO: Check instance state as Terminate and should release IP
     logger_utils.asleep(
         'Sleeping for instance to terminate and release IP',
         delay=60,
         period=20)
Ejemplo n.º 14
0
def disconnect_zk(vm, duration):
    """ Blocks the port used by zookeeper. Use duration = -1 to indefinitely
    block the port """
    vm.block_port_range(5000, 5097)
    duration = float(duration)
    if duration > 0:
        logger_utils.asleep(delay=duration)
        vm.clear_iptables()
        logger_utils.asleep(delay=120)
        cluster.wait_until_n_cluster_nodes_ready(
            len(infra_utils.get_vm_of_type('controller')))
        update_processes_for_all_controllers_and_ses()
Ejemplo n.º 15
0
def delete_dns_system_configuration():
    """
    
    :return: 
    """

    status_code, response = rest.get('systemconfiguration')
    response['dns_virtualservice_uuids'] = []
    if 'dns_virtualservice_refs' in response:
        del response['dns_virtualservice_refs']
    rest.put('systemconfiguration', data=response)

    logger_utils.asleep(delay=5)
Ejemplo n.º 16
0
def wait_for_next_hs_computation(now=None, step=300, step_times=1):
    """

    :param now:
    :param step:
    :param step_times:
    :return:
    """

    time_to_wait = hs_next_computation(now, step, int(step_times))
    logger.info('Sleeping for hs computation for %s secs' % time_to_wait)
    logger_utils.asleep(delay=time_to_wait)
    return
Ejemplo n.º 17
0
def warm_restart_cluster(wait=True):
    ctrl_vm = get_cluster_master_vm()

    path = os.path.join('http://localhost:%d' % (PROC_SUPERVISOR_PORT),
                        'service_event?status=warm-restart')
    cmd_str = 'curl %s' % path
    rsp = ctrl_vm.execute_command(cmd_str)
    if wait is True:
        logger_utils.asleep(delay=20)
        wait_until_cluster_ready()
        wait_until_res_mgr_coldstart_complete()
        #update_processes_for_all_controllers()
    return str(rsp[0])
Ejemplo n.º 18
0
 def wait_until_vm_is_up(self, vm_name=None, timeout=3600):
     if not vm_name:
         vm_name = self.vm_json.get('name')
     vcenter_handle = self.server
     num_loops = int(timeout) / 30
     i = 0
     while i < num_loops:
         vm = vcenter_handle.get_vm_by_name(vm_name)
         props = vm.get_properties()
         if 'net' in props.keys():
             logger.info('vm network info: %s' % (props.get('net')))
             return
         asleep(msg='Waiting for vm to come up', delay=30)
         i += 1
Ejemplo n.º 19
0
    def test_traffic_1(self):
        # traffic_manager.create_clients(1, 'c', 'net1', 'httperf')
        msvc_map = mesos_lib.create_erdos_renyi_graph(
            self.app_prefix,
            self.num_apps,
            None,
            self.num_edges,
            ip_client='c1',
            northsouth=self.northsouth)

        mesos_lib.generate_microservice_traffic(msvc_map, load=1)
        start_time = mesos_lib.generate_microservice_traffic(msvc_map)
        logger_utils.asleep(delay=15)
        mesos_lib.validate_microservice_traffic(msvc_map, start_time)
Ejemplo n.º 20
0
def autoscale_raise_min_size(pool_name, **kwargs):
    """
    pass the autoscale policy settings into the kwargs
    :param pool_name:
    :param kwargs:
    :return:
    """

    _, pool_obj = rest.get('pool', name=pool_name)
    asp_ref = pool_obj['autoscale_policy_ref']
    as_policy_uuid = asp_ref.split('serverautoscalepolicy/')[1].split('#')[0]

    _, autoscale_policy = rest.get('serverautoscalepolicy',
                                   uuid=as_policy_uuid)
    logger.info('received asp %s type %s ' %
                (autoscale_policy, type(autoscale_policy)))
    as_policy_old = copy.deepcopy(autoscale_policy)
    orig_min_size = autoscale_policy['min_size']
    orig_max_size = autoscale_policy['max_size']
    num_servers = len(pool_obj['servers'])
    for k, v in kwargs.iteritems():
        logger.info('k,v %s,%s' % (k, v))
        autoscale_policy[k] = v
    autoscale_policy['min_size'] = num_servers + 1
    autoscale_policy['max_size'] = max(autoscale_policy['max_size'],
                                       autoscale_policy['min_size'])
    asp_json = json.dumps(autoscale_policy)
    logger.info(' json: %s' % asp_json)
    rc, result = rest.put('serverautoscalepolicy',
                          uuid=as_policy_uuid,
                          data=asp_json)
    logger.info('updating as_policy %s %s %s' % (autoscale_policy, rc, result))
    logger_utils.asleep(delay=AS_WAIT_TIME)
    get_autoscale_info(pool_name)
    _, pool_obj = rest.get('pool', name=pool_name)
    num_servers = len(pool_obj['servers'])
    if num_servers == 0:
        logger_utils.fail('Pool %s has no up servers' % pool_name)
    _, autoscale_policy = rest.get('serverautoscalepolicy',
                                   uuid=as_policy_uuid)
    autoscale_policy['min_size'] = orig_min_size
    autoscale_policy['max_size'] = orig_max_size
    asp_json = json.dumps(autoscale_policy)
    logger.info('json: %s' % asp_json)
    rc, result = rest.put('serverautoscalepolicy',
                          uuid=as_policy_uuid,
                          data=asp_json)
    logger.info('rc: %s result: %s' % (rc, result))
    return autoscale_policy['min_size']
Ejemplo n.º 21
0
def wait_for_server_count_change(vs_name, prev_count, expected_op='>'):
    """

    :param vs_name:
    :param prev_count:
    :param expected_op:
    :return:
    """

    for _ in xrange(20):
        curr_count = vs_lib.get_vs_pool_server_count(vs_name)
        if eval('%d %s %d' % (curr_count, expected_op, prev_count)):
            return
            logger_utils.asleep(delay=15)
    logger_utils.fail('vs %s server count did not change prev %s curr %s' %
                      (vs_name, prev_count, curr_count))
Ejemplo n.º 22
0
def reboot_clean(update_admin_info=True, **kwargs):
    post('cluster', path='reboot', data=json.dumps({'mode': 'REBOOT_CLEAN'}))
    asleep(msg="Sleep before cluster wait check", delay=120)
    wait_until_cluster_ready()
    set_sysadmin_public_key()
    set_systemconfiguration()
    set_ha_mode_best_effort()
    if update_admin_info:
        config = AviConfig.get_instance()
        mode = config.get_mode()
        logger.debug("Current Default Mode %s" % mode)
        username = mode['user']
        password = '******'
        mode["password"] = '******'
        update_admin_user(username=username, password=password)
    switch_mode(**mode)
Ejemplo n.º 23
0
 def delete_vm(self, vm_name=None, raise_error=True):
     """ Delete VM instance """
     if not vm_name:
         vm_name = self.vm_json.get('name')
     try:
         result = self.compute_client.virtual_machines.delete(
             self.resource_group, vm_name)
         asleep(msg='Delete Virtual in-progress .. internal wait is there.',
                delay=10)
         result.wait()
     except Exception as e:
         logger.info("Got Exception while deleting VM: %s , Exp: %s" %
                     (vm_name, e.message))
         if raise_error:
             fail('Error while Deleting Controller Virtual Machine: %s',
                  str(e))
Ejemplo n.º 24
0
 def test_token_authenticated_traffic(self):
     # traffic_manager.create_clients(1, 'c', 'net1', 'httperf')
     msvc_map = mesos_lib.create_erdos_renyi_graph(
         'auth1',
         self.num_apps,
         None,
         self.num_edges,
         ip_client='c1',
         northsouth=self.northsouth)
     mesos_lib.generate_microservice_traffic(msvc_map,
                                             auth_type='token',
                                             load=1)
     start_time = mesos_lib.generate_microservice_traffic(msvc_map,
                                                          auth_type='token')
     logger_utils.asleep(delay=15)
     mesos_lib.validate_microservice_traffic(msvc_map, start_time)
Ejemplo n.º 25
0
 def wait_until_vm_is_up(self):
     instance = self.reservation.instances[0]
     logger_utils.asleep(delay=10)
     logger.info('Wait until instance %s goes to running state' %
                 instance.id)
     ip_addr = self._wait_until_instance_in_state(instance)
     try:
         vm_name = self.vm_json.get('name')
         instance.add_tag('Name', vm_name)
         instance.add_tag('Owner', vm_name)
         wait_until_vm_ready([ip_addr])
         logger.debug('Set data interfaces for instance: %s' % instance.id)
         #offset = self.__set_data_interfaces(
         #instance, vm, networks, offset, sec_grps)
     except Exception as e:
         logger_utils.fail(e)
     return instance
Ejemplo n.º 26
0
    def test_traffic_with_security_policy(self):
        # traffic_manager.create_clients(1, 'c', 'net1', 'httperf')
        adj_list = []
        blocked = True
        mesos_lib.add_edge(adj_list, 't2-1', 't2-2', blocked=blocked)
        mesos_lib.add_edge(adj_list, 't2-1', 't2-3')
        mesos_lib.add_edge(adj_list, 't2-1', 't2-4')
        mesos_lib.add_edge(adj_list, 't2-2', 't2-1', blocked=blocked)
        mesos_lib.add_edge(adj_list, 't2-2', 't2-4')
        mesos_lib.add_edge(adj_list, 't2-3', 't2-1', blocked=blocked)
        mesos_lib.add_edge(adj_list, 't2-4', 't2-2')
        mesos_lib.add_edge(adj_list, 't2-5', 't2-2')

        msvc_map = mesos_lib.create_microservice_map(adj_list)
        mesos_lib.generate_microservice_traffic(msvc_map, load=1)
        start_time = mesos_lib.generate_microservice_traffic(msvc_map)
        logger_utils.asleep(delay=15)
        mesos_lib.validate_microservice_traffic(msvc_map, start_time)
Ejemplo n.º 27
0
    def test_check_realtime_traffic(self):
        # traffic_manager.create_clients(1, 'c', 'net1', 'httperf')
        mesos_lib.set_mesos_rt_collection(rt_flag=True)
        # sleep for the default collection period until the new period kicks in
        logger_utils.asleep(delay=60)
        # Generate traffic
        adj_list = []
        mesos_lib.add_edge(adj_list, 'c1', 'ma1-1', src_type='vm', load=20000)

        msvc_map = mesos_lib.create_microservice_map(adj_list)
        start_time = mesos_lib.generate_microservice_traffic(msvc_map,
                                                             load=20000)
        logger_utils.asleep(delay=5)
        ma1_1_pool = mesos_lib.get_pool_name_with_tenant('ma1-1')
        metrics_lib.metrics_check_poolvm_container(ma1_1_pool,
                                                   step=5,
                                                   limit=12,
                                                   mbaseline=0)
        mesos_lib.set_mesos_rt_collection(rt_flag=False)
Ejemplo n.º 28
0
    def test_multiple_marathon_traffic(self):
        adj_list = []
        mesos_lib.add_edge(adj_list, 'mara1-1', 'mara2-1')
        mesos_lib.add_edge(adj_list, 'mara1-3', 'mara2-2')
        mesos_lib.add_edge(adj_list, 'mara1-2', 'mara2-3')
        mesos_lib.add_edge(adj_list, 'mara2-3', 'mara1-1')
        mesos_lib.add_edge(adj_list, 'mara2-2', 'mara1-2')
        mesos_lib.add_edge(adj_list, 'mara2-1', 'mara1-3')
        mesos_lib.add_edge(adj_list, 'c1', 'mara1-1', src_type='vm')
        mesos_lib.add_edge(adj_list, 'c1', 'mara2-1', src_type='vm')

        msvc_map = mesos_lib.create_microservice_map(adj_list)
        mesos_lib.generate_microservice_traffic(msvc_map,
                                                auth_type='hybrid',
                                                load=1)
        start_time = mesos_lib.generate_microservice_traffic(
            msvc_map, auth_type='hybrid')
        logger_utils.asleep(delay=15)
        mesos_lib.validate_microservice_traffic(msvc_map, start_time)
Ejemplo n.º 29
0
    def create_nic(self, vm_name=None):
        """ Create Network interface """
        if not vm_name:
            vm_name = self.vm_json.get('name')

        controller_ip = self.get_ip_from_azure()
        nic_name = "%s-NIC" % vm_name
        try:
            nic = self.network_client.network_interfaces.create_or_update(
                self.resource_group,
                nic_name,
                parameters=self.get_nic_params(controller_ip))
            asleep(
                msg='Creating NIC in-progress .. internal wait is there ...',
                delay=10)
            nic.wait()
            return controller_ip
        except Exception as e:
            fail('Error while creating a NIC %s' % str(e))
Ejemplo n.º 30
0
def verify_traffic_can_flow(vs_name, vport, num_conns, retry=5):
    """

    :param vs_name:
    :param vport:
    :param num_conns:
    :param retry:
    :return:
    """

    num_connections = int(num_conns)
    retry = int(retry)
    try:
        tcpstat_pre = tcp_lib.tcpstat(vs_name)
    except Exception:
        logger_utils.fail('tcpstat not got before flow starts')
    request('get',
            vs_name,
            vport,
            '/',
            concurrent_conn_per_client=num_connections,
            skip_exception=1)
    time.sleep(3)
    # retry another 10 sec to wait for connection finished
    while retry > 0:
        try:
            tcpstat_post = tcp_lib.tcpstat(vs_name)
        except Exception:
            logger_utils.fail('tcpstat not got after flow starts')
        tcps_num_conns = json_utils.json_diff(tcpstat_post, tcpstat_pre,
                                              'connections_closed')
        logger.info('conn diff:', tcps_num_conns, '--- expected:',
                    str(num_connections * 2))
        if tcps_num_conns < num_connections * 2:
            logger.info('Not all connections went through. Retrying..')
            retry -= 1
            logger_utils.asleep(delay=10)
            continue
        else:
            break
    if retry <= 0:
        logger_utils.fail('Not all connections went through!')