示例#1
0
文件: onos.py 项目: sorantis/functest
    def create_image(self):
        glance_client = openstack_utils.get_glance_client()
        image_id = openstack_utils.create_glance_image(
            glance_client, self.onos_sfc_image_name, self.onos_sfc_image_path)
        if image_id is None:
            raise Exception('Failed to create image')

        logger.debug("Image '%s' with ID=%s is created successfully." %
                     (self.onos_sfc_image_name, image_id))
示例#2
0
def setup_glance(glance_client):
    image_id = os_utils.create_glance_image(glance_client,
                                            IMAGE_NAME,
                                            IMAGE_PATH,
                                            disk=IMAGE_FORMAT,
                                            container="bare",
                                            public=True)

    return image_id
示例#3
0
def create_tempest_resources():
    ks_creds = os_utils.get_credentials("keystone")
    logger.debug("Creating tenant and user for Tempest suite")
    keystone = ksclient.Client(**ks_creds)
    tenant_id = os_utils.create_tenant(keystone,
                                       TENANT_NAME,
                                       TENANT_DESCRIPTION)
    if tenant_id == '':
        logger.error("Error : Failed to create %s tenant" % TENANT_NAME)

    user_id = os_utils.create_user(keystone, USER_NAME, USER_PASSWORD,
                                   None, tenant_id)
    if user_id == '':
        logger.error("Error : Failed to create %s user" % USER_NAME)

    logger.debug("Creating private network for Tempest suite")
    creds_neutron = os_utils.get_credentials("neutron")
    neutron_client = neutronclient.Client(**creds_neutron)
    network_dic = os_utils.create_network_full(logger,
                                               neutron_client,
                                               PRIVATE_NET_NAME,
                                               PRIVATE_SUBNET_NAME,
                                               ROUTER_NAME,
                                               PRIVATE_SUBNET_CIDR)
    if network_dic:
        if not os_utils.update_neutron_net(neutron_client,
                                           network_dic['net_id'],
                                           shared=True):
            logger.error("Failed to update private network...")
            exit(-1)
        else:
            logger.debug("Network '%s' is available..." % PRIVATE_NET_NAME)
    else:
        logger.error("Private network creation failed")
        exit(-1)

    logger.debug("Creating image for Tempest suite")
    glance_endpoint = keystone.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance_client = glanceclient.Client(1, glance_endpoint,
                                        token=keystone.auth_token)
    # Check if the given image exists
    image_id = os_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
    if image_id != '':
        logger.info("Using existing image '%s'..." % GLANCE_IMAGE_NAME)
    else:
        logger.info("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
                                                          GLANCE_IMAGE_PATH))
        image_id = os_utils.create_glance_image(glance_client,
                                                GLANCE_IMAGE_NAME,
                                                GLANCE_IMAGE_PATH)
        if not image_id:
            logger.error("Failed to create a Glance image...")
            exit(-1)
        logger.debug("Image '%s' with ID=%s created successfully."
                     % (GLANCE_IMAGE_NAME, image_id))
示例#4
0
文件: onos.py 项目: rski/functest
def CreateImage():
    glance_client = openstack_utils.get_glance_client()
    image_id = openstack_utils.create_glance_image(glance_client,
                                                   ONOS_SFC_IMAGE_NAME,
                                                   ONOS_SFC_IMAGE_PATH)
    EXIT_CODE = -1
    if not image_id:
        logger.error("Failed to create a Glance image...")
        return (EXIT_CODE)
    logger.debug("Image '%s' with ID=%s created successfully." %
                 (ONOS_SFC_IMAGE_NAME, image_id))
示例#5
0
def CreateImage():
    glance_client = openstack_utils.get_glance_client()
    image_id = openstack_utils.create_glance_image(glance_client,
                                                   GLANCE_IMAGE_NAME,
                                                   GLANCE_IMAGE_PATH)
    EXIT_CODE = -1
    if not image_id:
        logger.error("Failed to create a Glance image...")
        return(EXIT_CODE)
    logger.debug("Image '%s' with ID=%s created successfully."
                 % (GLANCE_IMAGE_NAME, image_id))
示例#6
0
def download_and_add_image_on_glance(glance, image_name,
                                     image_url, data_dir):
    dest_path = data_dir
    if not os.path.exists(dest_path):
        os.makedirs(dest_path)
    file_name = image_url.rsplit('/')[-1]
    if not ft_utils.download_url(image_url, dest_path):
        return False
    image = os_utils.create_glance_image(
        glance, image_name, dest_path + file_name)
    if not image:
        return False
    return image
示例#7
0
def download_and_add_image_on_glance(glance, image_name, image_url):
    dest_path = VIMS_DATA_DIR + "tmp/"
    if not os.path.exists(dest_path):
        os.makedirs(dest_path)
    file_name = image_url.rsplit('/')[-1]
    if not functest_utils.download_url(image_url, dest_path):
        logger.error("Failed to download image %s" % file_name)
        return False

    image = os_utils.create_glance_image(
        glance, image_name, dest_path + file_name)
    if not image:
        logger.error("Failed to upload image on glance")
        return False

    return image
示例#8
0
文件: vHello.py 项目: blsaws/vHello
def download_and_add_image_on_glance(glance, image_name, image_url):
    dest_path = vHello_DATA_DIR + "tmp/"
    if not os.path.exists(dest_path):
        os.makedirs(dest_path)
    file_name = image_url.rsplit('/')[-1]
    if not functest_utils.download_url(image_url, dest_path):
        logger.error("Failed to download image %s" % file_name)
        return False

    image = os_utils.create_glance_image(
        glance, image_name, dest_path + file_name)
    if not image:
        logger.error("Failed to upload image on glance")
        return False

    return image
示例#9
0
def main():
    results = Results(COMMON_CONFIG.line_length)

    results.add_to_summary(0, "=")
    results.add_to_summary(2, "STATUS", "SUBTEST")
    results.add_to_summary(0, "=")

    nova_client = os_utils.get_nova_client()
    neutron_client = os_utils.get_neutron_client()
    glance_client = os_utils.get_glance_client()

    (floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
     subnet_ids, interfaces, bgpvpn_ids, flavor_ids) = ([] for i in range(9))

    try:
        image_id = os_utils.create_glance_image(
            glance_client,
            COMMON_CONFIG.ubuntu_image_name,
            COMMON_CONFIG.ubuntu_image_path,
            disk="qcow2",
            container="bare",
            public="public")
        image_ids.append(image_id)

        _, flavor_id = test_utils.create_custom_flavor()
        flavor_ids.append(flavor_id)

        network_1_id, subnet_1_id, router_1_id = test_utils.create_network(
            neutron_client, TESTCASE_CONFIG.net_1_name,
            TESTCASE_CONFIG.subnet_1_name, TESTCASE_CONFIG.subnet_1_cidr,
            TESTCASE_CONFIG.router_1_name)

        interfaces.append(tuple((router_1_id, subnet_1_id)))
        network_ids.extend([network_1_id])
        subnet_ids.extend([subnet_1_id])
        router_ids.extend([router_1_id])

        sg_id = os_utils.create_security_group_full(
            neutron_client, TESTCASE_CONFIG.secgroup_name,
            TESTCASE_CONFIG.secgroup_descr)

        compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)

        av_zone_1 = "nova:" + compute_nodes[0]
        av_zone_2 = "nova:" + compute_nodes[1]

        u1 = test_utils.generate_userdata_interface_create(
            TESTCASE_CONFIG.interface_name, TESTCASE_CONFIG.interface_number,
            TESTCASE_CONFIG.extra_route_ip,
            TESTCASE_CONFIG.extra_route_subnet_mask)
        # boot INTANCES
        vm_1 = test_utils.create_instance(
            nova_client,
            TESTCASE_CONFIG.instance_1_name,
            image_id,
            network_1_id,
            sg_id,
            flavor=COMMON_CONFIG.custom_flavor_name,
            secgroup_name=TESTCASE_CONFIG.secgroup_name,
            compute_node=av_zone_1,
            userdata=u1)
        vm_1_ip = test_utils.get_instance_ip(vm_1)

        vm1_port = test_utils.get_port(neutron_client, vm_1.id)
        test_utils.update_port_allowed_address_pairs(
            neutron_client, vm1_port['id'], [
                test_utils.AllowedAddressPair(TESTCASE_CONFIG.extra_route_cidr,
                                              vm1_port['mac_address'])
            ])

        vm_2 = test_utils.create_instance(
            nova_client,
            TESTCASE_CONFIG.instance_2_name,
            image_id,
            network_1_id,
            sg_id,
            flavor=COMMON_CONFIG.custom_flavor_name,
            secgroup_name=TESTCASE_CONFIG.secgroup_name,
            compute_node=av_zone_1,
            userdata=u1)
        vm_2_ip = test_utils.get_instance_ip(vm_2)

        vm2_port = test_utils.get_port(neutron_client, vm_2.id)
        test_utils.update_port_allowed_address_pairs(
            neutron_client, vm2_port['id'], [
                test_utils.AllowedAddressPair(TESTCASE_CONFIG.extra_route_cidr,
                                              vm2_port['mac_address'])
            ])

        test_utils.async_Wait_for_instances([vm_1, vm_2])

        msg = ("Create VPN with multiple RDs")
        results.record_action(msg)
        vpn_name = "sdnvpn-" + str(randint(100000, 999999))
        kwargs = {
            "import_targets": TESTCASE_CONFIG.targets1,
            "export_targets": TESTCASE_CONFIG.targets2,
            "route_distinguishers": TESTCASE_CONFIG.route_distinguishers,
            "name": vpn_name
        }
        bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
        bgpvpn_id = bgpvpn['bgpvpn']['id']
        logger.debug("VPN created details: %s" % bgpvpn)
        bgpvpn_ids.append(bgpvpn_id)

        msg = ("Associate router '%s' to the VPN." %
               TESTCASE_CONFIG.router_1_name)
        results.record_action(msg)
        results.add_to_summary(0, "-")

        test_utils.create_router_association(neutron_client, bgpvpn_id,
                                             router_1_id)

        test_utils.update_router_extra_route(neutron_client, router_1_id, [
            test_utils.ExtraRoute(TESTCASE_CONFIG.extra_route_cidr, vm_1_ip),
            test_utils.ExtraRoute(TESTCASE_CONFIG.extra_route_cidr, vm_2_ip)
        ])

        image_2_id = os_utils.create_glance_image(
            glance_client,
            TESTCASE_CONFIG.image_name,
            COMMON_CONFIG.image_path,
            disk=COMMON_CONFIG.image_format,
            container="bare",
            public='public')
        image_ids.append(image_2_id)

        logger.info("Waiting for the VMs to connect to each other using the"
                    " updated network configuration")
        test_utils.wait_before_subtest()

        u3 = test_utils.generate_ping_userdata(
            [TESTCASE_CONFIG.extra_route_ip])
        vm_3 = test_utils.create_instance(
            nova_client,
            TESTCASE_CONFIG.instance_3_name,
            image_2_id,
            network_1_id,
            sg_id,
            flavor=COMMON_CONFIG.custom_flavor_name,
            secgroup_name=TESTCASE_CONFIG.secgroup_name,
            compute_node=av_zone_2,
            userdata=u3)

        instance_ids.extend([vm_1.id, vm_2.id, vm_3.id])

        instance_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_3)

        if (not instance_dhcp_up):
            logger.error("vm_3 instance is down")

        results.get_ping_status_target_ip(vm_3,
                                          TESTCASE_CONFIG.extra_route_name,
                                          TESTCASE_CONFIG.extra_route_ip,
                                          expected="PASS",
                                          timeout=300)

        results.add_to_summary(0, "=")
        logger.info("\n%s" % results.summary)

    except Exception as e:
        logger.error("exception occurred while executing testcase_13: %s", e)
        raise
    finally:
        test_utils.update_router_no_extra_route(neutron_client, router_ids)
        test_utils.cleanup_nova(nova_client, instance_ids)
        test_utils.cleanup_glance(glance_client, image_ids)
        test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
                                   interfaces, subnet_ids, router_ids,
                                   network_ids)

    return results.compile_summary()
def main():

    nova_client = os_utils.get_nova_client()
    neutron_client = os_utils.get_neutron_client()
    glance_client = os_utils.get_glance_client()

    image_id = os_utils.create_glance_image(glance_client,
                                            IMAGE_NAME,
                                            IMAGE_PATH,
                                            disk=IMAGE_FORMAT,
                                            container="bare",
                                            public=True,
                                            logger=logger)

    network_dic = os_utils.create_network_full(logger,
                                               neutron_client,
                                               NET_NAME,
                                               SUBNET_NAME,
                                               ROUTER_NAME,
                                               SUBNET_CIDR)
    if not network_dic:
        logger.error(
            "There has been a problem when creating the neutron network")
        sys.exit(-1)

    network_id = network_dic["net_id"]

    sg_id = os_utils.create_security_group_full(logger, neutron_client,
                                                SECGROUP_NAME, SECGROUP_DESCR)

    # boot INTANCE
    logger.info("Creating instance '%s'..." % INSTANCE_NAME)
    logger.debug(
        "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
        "network=%s \n" % (INSTANCE_NAME, FLAVOR, image_id, network_id))
    instance = os_utils.create_instance_and_wait_for_active(FLAVOR,
                                                            image_id,
                                                            network_id,
                                                            INSTANCE_NAME)

    if instance is None:
        logger.error("Error while booting instance.")
        sys.exit(-1)
    # Retrieve IP of INSTANCE
    instance_ip = instance.networks.get(NET_NAME)[0]
    logger.debug("Instance '%s' got private ip '%s'." %
                 (INSTANCE_NAME, instance_ip))

    logger.info("Adding '%s' to security group '%s'..."
                % (INSTANCE_NAME, SECGROUP_NAME))
    os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)

    logger.info("Creating floating IP for VM '%s'..." % INSTANCE_NAME)
    floatip_dic = os_utils.create_floating_ip(neutron_client)
    floatip = floatip_dic['fip_addr']
    # floatip_id = floatip_dic['fip_id']

    if floatip is None:
        logger.error("Cannot create floating IP.")
        sys.exit(-1)
    logger.info("Floating IP created: '%s'" % floatip)

    logger.info("Associating floating ip: '%s' to VM '%s' "
                % (floatip, INSTANCE_NAME))
    if not os_utils.add_floating_ip(nova_client, instance.id, floatip):
        logger.error("Cannot associate floating IP to VM.")
        sys.exit(-1)

    sys.exit(0)
示例#11
0
def main():
    deploymentHandler = DeploymentFactory.get_handler(
        COMMON_CONFIG.installer_type,
        COMMON_CONFIG.installer_ip,
        COMMON_CONFIG.installer_user,
        installer_pwd=COMMON_CONFIG.installer_password)

    cluster = COMMON_CONFIG.installer_cluster
    all_nodes = (deploymentHandler.get_nodes({'cluster': cluster})
                 if cluster is not None
                 else deploymentHandler.get_nodes())

    controller_nodes = [node for node in all_nodes if node.is_controller()]
    compute_nodes = [node for node in all_nodes if node.is_compute()]

    odl_ip, odl_port = test_utils.get_odl_ip_port(all_nodes)

    results = Results(COMMON_CONFIG.line_length)
    results.add_to_summary(0, "=")
    results.add_to_summary(2, "STATUS", "SUBTEST")
    results.add_to_summary(0, "=")

    test_utils.setup_compute_node(TESTCASE_CONFIG.subnet_cidr, compute_nodes)
    test_utils.configure_iptables(controller_nodes)
    test_utils.download_image(COMMON_CONFIG.url, COMMON_CONFIG.image_path)

    neutron_client = os_utils.get_neutron_client()
    nova_client = os_utils.get_nova_client()
    tacker_client = os_tacker.get_tacker_client()

    _, custom_flavor_id = os_utils.get_or_create_flavor(
        COMMON_CONFIG.flavor,
        COMMON_CONFIG.ram_size_in_mb,
        COMMON_CONFIG.disk_size_in_gb,
        COMMON_CONFIG.vcpu_count,
        public=True)
    if custom_flavor_id is None:
        logger.error("Failed to create custom flavor")
        sys.exit(1)

    controller_clients = test_utils.get_ssh_clients(controller_nodes)
    compute_clients = test_utils.get_ssh_clients(compute_nodes)

    ovs_logger = ovs_log.OVSLogger(
        os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
        COMMON_CONFIG.functest_results_dir)

    image_id = os_utils.create_glance_image(
        os_utils.get_glance_client(),
        COMMON_CONFIG.image_name,
        COMMON_CONFIG.image_path,
        COMMON_CONFIG.image_format,
        public='public')

    network_id = test_utils.setup_neutron(
        neutron_client,
        TESTCASE_CONFIG.net_name,
        TESTCASE_CONFIG.subnet_name,
        TESTCASE_CONFIG.router_name,
        TESTCASE_CONFIG.subnet_cidr)

    sg_id = test_utils.create_security_groups(
        neutron_client,
        TESTCASE_CONFIG.secgroup_name,
        TESTCASE_CONFIG.secgroup_descr)

    vnf_name = 'testVNF1'
    # Using seed=0 uses the baseline topology: everything in the same host
    testTopology = topo_shuffler.topology([vnf_name], seed=0)
    logger.info('This test is run with the topology {0}'
                .format(testTopology['id']))
    logger.info('Topology description: {0}'
                .format(testTopology['description']))

    client_instance = test_utils.create_instance(
        nova_client,
        CLIENT,
        COMMON_CONFIG.flavor,
        image_id,
        network_id,
        sg_id,
        av_zone=testTopology[CLIENT])

    server_instance = test_utils.create_instance(
        nova_client,
        SERVER,
        COMMON_CONFIG.flavor,
        image_id,
        network_id,
        sg_id,
        av_zone=testTopology[SERVER])

    server_ip = server_instance.networks.get(TESTCASE_CONFIG.net_name)[0]

    tosca_file = os.path.join(
        COMMON_CONFIG.sfc_test_dir,
        COMMON_CONFIG.vnfd_dir,
        TESTCASE_CONFIG.test_vnfd)

    default_param_file = os.path.join(
        COMMON_CONFIG.sfc_test_dir,
        COMMON_CONFIG.vnfd_dir,
        COMMON_CONFIG.vnfd_default_params_file)

    os_tacker.create_vnfd(tacker_client, tosca_file=tosca_file)
    test_utils.create_vnf_in_av_zone(
        tacker_client,
        vnf_name,
        'test-vnfd1',
        default_param_file,
        testTopology[vnf_name])

    vnf_id = os_tacker.wait_for_vnf(tacker_client, vnf_name=vnf_name)
    if vnf_id is None:
        logger.error('ERROR while booting VNF')
        sys.exit(1)

    vnf_instance_id = test_utils.get_nova_id(tacker_client, 'vdu1', vnf_id)
    os_utils.add_secgroup_to_instance(nova_client, vnf_instance_id, sg_id)

    os_tacker.create_sfc(
        tacker_client,
        sfc_name='red',
        chain_vnf_names=[vnf_name],
        symmetrical=True)

    os_tacker.create_sfc_classifier(
        tacker_client, 'red_http', sfc_name='red',
        match={
            'source_port': 0,
            'dest_port': 80,
            'protocol': 6
        })

    # FIXME: JIRA SFC-86
    # Tacker does not allow to specify the direction of the chain to be used,
    # only references the SFP (which for symmetric chains results in two RSPs)
    os_tacker.create_sfc_classifier(
        tacker_client, 'red_http_reverse', sfc_name='red',
        match={
            'source_port': 80,
            'dest_port': 0,
            'protocol': 6
        })

    logger.info(test_utils.run_cmd('tacker sfc-list'))
    logger.info(test_utils.run_cmd('tacker sfc-classifier-list'))

    # Start measuring the time it takes to implement the classification rules
    t1 = threading.Thread(target=test_utils.wait_for_classification_rules,
                          args=(ovs_logger, compute_nodes, odl_ip, odl_port,
                                testTopology,))

    try:
        t1.start()
    except Exception as e:
        logger.error("Unable to start the thread that counts time %s" % e)

    logger.info("Assigning floating IPs to instances")
    server_floating_ip = test_utils.assign_floating_ip(
        nova_client, neutron_client, server_instance.id)
    client_floating_ip = test_utils.assign_floating_ip(
        nova_client, neutron_client, client_instance.id)
    sf_floating_ip = test_utils.assign_floating_ip(
        nova_client, neutron_client, vnf_instance_id)

    for ip in (server_floating_ip, client_floating_ip, sf_floating_ip):
        logger.info("Checking connectivity towards floating IP [%s]" % ip)
        if not test_utils.ping(ip, retries=50, retry_timeout=1):
            logger.error("Cannot ping floating IP [%s]" % ip)
            sys.exit(1)
        logger.info("Successful ping to floating IP [%s]" % ip)

    if not test_utils.check_ssh([sf_floating_ip]):
        logger.error("Cannot establish SSH connection to the SFs")
        sys.exit(1)

    logger.info("Starting HTTP server on %s" % server_floating_ip)
    if not test_utils.start_http_server(server_floating_ip):
        logger.error('\033[91mFailed to start the HTTP server\033[0m')
        sys.exit(1)

    blocked_port = TESTCASE_CONFIG.blocked_source_port
    logger.info("Firewall started, blocking traffic port %d" % blocked_port)
    test_utils.start_vxlan_tool(sf_floating_ip, block=blocked_port)

    logger.info("Wait for ODL to update the classification rules in OVS")
    t1.join()

    allowed_port = TESTCASE_CONFIG.allowed_source_port
    logger.info("Test if HTTP from port %s works" % allowed_port)
    if not test_utils.is_http_blocked(
            client_floating_ip, server_ip, allowed_port):
        results.add_to_summary(2, "PASS", "HTTP works")
    else:
        error = ('\033[91mTEST 1 [FAILED] ==> HTTP BLOCKED\033[0m')
        logger.error(error)
        test_utils.capture_ovs_logs(
            ovs_logger, controller_clients, compute_clients, error)
        results.add_to_summary(2, "FAIL", "HTTP works")

    logger.info("Test if HTTP from port %s is blocked" % blocked_port)
    if test_utils.is_http_blocked(
            client_floating_ip, server_ip, blocked_port):
        results.add_to_summary(2, "PASS", "HTTP Blocked")
    else:
        error = ('\033[91mTEST 2 [FAILED] ==> HTTP WORKS\033[0m')
        logger.error(error)
        test_utils.capture_ovs_logs(
            ovs_logger, controller_clients, compute_clients, error)
        results.add_to_summary(2, "FAIL", "HTTP Blocked")

    return results.compile_summary()
示例#12
0
def main():
    results = Results(COMMON_CONFIG.line_length)

    results.add_to_summary(0, "=")
    results.add_to_summary(2, "STATUS", "SUBTEST")
    results.add_to_summary(0, "=")

    nova_client = os_utils.get_nova_client()
    neutron_client = os_utils.get_neutron_client()
    glance_client = os_utils.get_glance_client()
    openstack_nodes = test_utils.get_nodes()

    (floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
     subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))

    try:
        image_id = os_utils.create_glance_image(
            glance_client, TESTCASE_CONFIG.image_name,
            COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,
            container="bare", public='public')
        image_ids.append(image_id)

        network_1_id = test_utils.create_net(neutron_client,
                                             TESTCASE_CONFIG.net_1_name)
        subnet_1_id = test_utils.create_subnet(neutron_client,
                                               TESTCASE_CONFIG.subnet_1_name,
                                               TESTCASE_CONFIG.subnet_1_cidr,
                                               network_1_id)

        network_ids.append(network_1_id)
        subnet_ids.append(subnet_1_id)

        sg_id = os_utils.create_security_group_full(
            neutron_client, TESTCASE_CONFIG.secgroup_name,
            TESTCASE_CONFIG.secgroup_descr)

        # Check required number of compute nodes
        compute_hostname = (
            nova_client.hypervisors.list()[0].hypervisor_hostname)
        compute_nodes = [node for node in openstack_nodes
                         if node.is_compute()]

        av_zone_1 = "nova:" + compute_hostname
        # List of OVS bridges to get groups
        ovs_br = "br-int"
        # Get a list of groups, before start topology
        initial_ovs_groups = test_utils.get_ovs_groups(compute_nodes,
                                                       [ovs_br])

        # boot INSTANCES
        vm_2 = test_utils.create_instance(
            nova_client,
            TESTCASE_CONFIG.instance_2_name,
            image_id,
            network_1_id,
            sg_id,
            secgroup_name=TESTCASE_CONFIG.secgroup_name,
            compute_node=av_zone_1)

        vm_1 = test_utils.create_instance(
            nova_client,
            TESTCASE_CONFIG.instance_1_name,
            image_id,
            network_1_id,
            sg_id,
            secgroup_name=TESTCASE_CONFIG.secgroup_name,
            compute_node=av_zone_1)
        instance_ids.extend([vm_1.id, vm_2.id])

        # Wait for VMs to get ips.
        instances_up = test_utils.wait_for_instances_up(vm_1, vm_2)

        if not instances_up:
            logger.error("One or more instances is down")
            # TODO: Handle this appropriately

        logging.info("Wait before subtest")
        test_utils.wait_before_subtest()
        # Get added OVS groups
        added_ovs_groups = (len(initial_ovs_groups) -
                            len(test_utils.get_ovs_groups(
                                compute_nodes, [ovs_br])))
        # Check if group added successfully
        results.record_action("Check if a new group was added to OVS")
        msg = "New OVS group added"
        results.add_to_summary(0, "-")
        if added_ovs_groups != 0:
            results.add_success(msg)
        else:
            results.add_failure(msg)
        results.add_to_summary(0, "=")
        # Backup OVS controller connection info.
        # To support HA changes should be made here.
        get_ext_ip_cmd = "sudo ovs-vsctl get-controller {}".format(ovs_br)
        ovs_controller_conn = (compute_nodes[0].run_cmd(get_ext_ip_cmd).
                               strip().split('\n')[0])
        # Disconnect OVS from controller
        for compute_node in compute_nodes:
            compute_node.run_cmd("sudo ovs-vsctl del-controller {}".
                                 format(ovs_br))
    except Exception as e:
        logger.error("exception occurred while executing testcase_1: %s", e)
        raise
    finally:
        # Cleanup topology
        test_utils.cleanup_nova(nova_client, instance_ids)
        test_utils.cleanup_glance(glance_client, image_ids)
        test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
                                   interfaces, subnet_ids, router_ids,
                                   network_ids)
    # Connect again OVS to Controller
    for compute_node in compute_nodes:
        compute_node.run_cmd("sudo ovs-vsctl set-controller {} {}".
                             format(ovs_br, ovs_controller_conn))
    logging.info("Wait before subtest")
    test_utils.wait_before_subtest()
    # Get OVS groups added after the reconnection
    added_ovs_groups = (len(initial_ovs_groups) -
                        len(test_utils.get_ovs_groups(
                            compute_nodes, [ovs_br])))

    # Check if group removed successfully
    results.record_action("Check if group was removed from OVS "
                          "after deleting the topology.")
    msg = ""
    # After removing the topology, groups must be equal to the initial
    if added_ovs_groups != 0:
        msg += " Additional group was not deleted from OVS"
    results.add_to_summary(0, "-")
    if len(msg) == 0:
        msg = "Group was deleted from ovs"
        results.add_success(msg)
    else:
        results.add_failure(msg)

    return results.compile_summary()
示例#13
0
def main():
    start_time = time.time()
    ks_creds = openstack_utils.get_credentials("keystone")
    nv_creds = openstack_utils.get_credentials("nova")
    nt_creds = openstack_utils.get_credentials("neutron")

    keystone = ksclient.Client(**ks_creds)

    user_id = openstack_utils.get_user_id(keystone, ks_creds['username'])
    if user_id == '':
        logger.error("Error : Failed to get id of %s user" %
                     ks_creds['username'])
        exit(-1)

    logger.info("Creating tenant '%s'..." % TENANT_NAME)
    tenant_id = openstack_utils.create_tenant(
        keystone, TENANT_NAME, TENANT_DESCRIPTION)
    if tenant_id == '':
        logger.error("Error : Failed to create %s tenant" % TENANT_NAME)
        exit(-1)
    logger.debug("Tenant '%s' created successfully." % TENANT_NAME)

    roles_name = ["admin", "Admin"]
    role_id = ''
    for role_name in roles_name:
        if role_id == '':
            role_id = openstack_utils.get_role_id(keystone, role_name)

    if role_id == '':
        logger.error("Error : Failed to get id for %s role" % role_name)
        exit(-1)

    logger.info("Adding role '%s' to tenant '%s'..." % (role_id, TENANT_NAME))
    if not openstack_utils.add_role_user(keystone, user_id,
                                         role_id, tenant_id):
        logger.error("Error : Failed to add %s on tenant %s" %
                     (ks_creds['username'], TENANT_NAME))
        exit(-1)
    logger.debug("Role added successfully.")

    logger.info("Creating user '%s'..." % USER_NAME)
    user_id = openstack_utils.create_user(
        keystone, USER_NAME, USER_PWD, None, tenant_id)

    if user_id == '':
        logger.error("Error : Failed to create %s user" % USER_NAME)
        exit(-1)
    logger.debug("User '%s' created successfully." % USER_NAME)

    logger.info("Updating OpenStack credentials...")
    ks_creds.update({
        "username": TENANT_NAME,
        "password": TENANT_NAME,
        "tenant_name": TENANT_NAME,
    })

    nt_creds.update({
        "tenant_name": TENANT_NAME,
    })

    nv_creds.update({
        "project_id": TENANT_NAME,
    })

    glance_endpoint = keystone.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance = glclient.Client(1, glance_endpoint, token=keystone.auth_token)
    nova = nvclient.Client("2", **nv_creds)

    logger.info("Creating image '%s' from '%s'..." % (IMAGE_NAME,
                                                      GLANCE_IMAGE_PATH))
    image_id = openstack_utils.create_glance_image(glance,
                                                   IMAGE_NAME,
                                                   GLANCE_IMAGE_PATH)
    if not image_id:
        logger.error("Failed to create the Glance image...")
        exit(-1)
    logger.debug("Image '%s' with ID '%s' created successfully." % (IMAGE_NAME,
                                                                    image_id))
    flavor_id = openstack_utils.get_flavor_id(nova, FLAVOR_NAME)
    if flavor_id == '':
        logger.info("Creating flavor '%s'..." % FLAVOR_NAME)
        flavor_id = openstack_utils.create_flavor(nova,
                                                  FLAVOR_NAME,
                                                  FLAVOR_RAM,
                                                  FLAVOR_DISK,
                                                  FLAVOR_VCPUS)
        if not flavor_id:
            logger.error("Failed to create the Flavor...")
            exit(-1)
        logger.debug("Flavor '%s' with ID '%s' created successfully." %
                     (FLAVOR_NAME, flavor_id))
    else:
        logger.debug("Using existing flavor '%s' with ID '%s'..."
                     % (FLAVOR_NAME, flavor_id))

    neutron = ntclient.Client(**nt_creds)

    network_dic = openstack_utils.create_network_full(logger,
                                                      neutron,
                                                      NET_NAME,
                                                      SUBNET_NAME,
                                                      ROUTER_NAME,
                                                      SUBNET_CIDR)
    if network_dic is False:
        logger.error("Failed to create the private network...")
        exit(-1)

    logger.info("Exporting environment variables...")
    os.environ["NODE_ENV"] = "functest"
    os.environ["OS_TENANT_NAME"] = TENANT_NAME
    os.environ["OS_USERNAME"] = USER_NAME
    os.environ["OS_PASSWORD"] = USER_PWD
    os.environ["OS_TEST_IMAGE"] = image_id
    os.environ["OS_TEST_FLAVOR"] = flavor_id
    os.environ["OS_TEST_NETWORK"] = network_dic["net_id"]

    os.chdir(PROMISE_REPO)
    results_file_name = 'promise-results.json'
    results_file = open(results_file_name, 'w+')
    cmd = 'npm run -s test -- --reporter json'

    logger.info("Running command: %s" % cmd)
    ret = subprocess.call(cmd, shell=True, stdout=results_file,
                          stderr=subprocess.STDOUT)
    results_file.close()

    if ret == 0:
        logger.info("The test succeeded.")
        # test_status = 'OK'
    else:
        logger.info("The command '%s' failed." % cmd)
        # test_status = "Failed"

    # Print output of file
    with open(results_file_name, 'r') as results_file:
        data = results_file.read()
        logger.debug("\n%s" % data)
        json_data = json.loads(data)

        suites = json_data["stats"]["suites"]
        tests = json_data["stats"]["tests"]
        passes = json_data["stats"]["passes"]
        pending = json_data["stats"]["pending"]
        failures = json_data["stats"]["failures"]
        start_time_json = json_data["stats"]["start"]
        end_time = json_data["stats"]["end"]
        duration = float(json_data["stats"]["duration"]) / float(1000)

    logger.info("\n"
                "****************************************\n"
                "          Promise test report\n\n"
                "****************************************\n"
                " Suites:  \t%s\n"
                " Tests:   \t%s\n"
                " Passes:  \t%s\n"
                " Pending: \t%s\n"
                " Failures:\t%s\n"
                " Start:   \t%s\n"
                " End:     \t%s\n"
                " Duration:\t%s\n"
                "****************************************\n\n"
                % (suites, tests, passes, pending, failures,
                   start_time_json, end_time, duration))

    if args.report:
        stop_time = time.time()
        json_results = {"timestart": start_time, "duration": duration,
                        "tests": int(tests), "failures": int(failures)}
        logger.debug("Promise Results json: " + str(json_results))

        # criteria for Promise in Release B was 100% of tests OK
        status = "FAIL"
        if int(tests) > 32 and int(failures) < 1:
            status = "PASS"

        functest_utils.push_results_to_db("promise",
                                          "promise",
                                          logger,
                                          start_time,
                                          stop_time,
                                          status,
                                          json_results)
示例#14
0
def main():
    exit_code = -1
    start_time = time.time()
    ks_creds = openstack_utils.get_credentials("keystone")
    nv_creds = openstack_utils.get_credentials("nova")
    nt_creds = openstack_utils.get_credentials("neutron")

    keystone = ksclient.Client(**ks_creds)

    user_id = openstack_utils.get_user_id(keystone, ks_creds['username'])
    if user_id == '':
        logger.error("Error : Failed to get id of %s user" %
                     ks_creds['username'])
        exit(-1)

    logger.info("Creating tenant '%s'..." % PROMISE_TENANT_NAME)
    tenant_id = openstack_utils.create_tenant(keystone, PROMISE_TENANT_NAME,
                                              TENANT_DESCRIPTION)
    if not tenant_id:
        logger.error("Error : Failed to create %s tenant" %
                     PROMISE_TENANT_NAME)
        exit(-1)
    logger.debug("Tenant '%s' created successfully." % PROMISE_TENANT_NAME)

    roles_name = ["admin", "Admin"]
    role_id = ''
    for role_name in roles_name:
        if role_id == '':
            role_id = openstack_utils.get_role_id(keystone, role_name)

    if role_id == '':
        logger.error("Error : Failed to get id for %s role" % role_name)
        exit(-1)

    logger.info("Adding role '%s' to tenant '%s'..." %
                (role_id, PROMISE_TENANT_NAME))
    if not openstack_utils.add_role_user(keystone, user_id, role_id,
                                         tenant_id):
        logger.error("Error : Failed to add %s on tenant %s" %
                     (ks_creds['username'], PROMISE_TENANT_NAME))
        exit(-1)
    logger.debug("Role added successfully.")

    logger.info("Creating user '%s'..." % PROMISE_USER_NAME)
    user_id = openstack_utils.create_user(keystone, PROMISE_USER_NAME,
                                          PROMISE_USER_PWD, None, tenant_id)

    if not user_id:
        logger.error("Error : Failed to create %s user" % PROMISE_USER_NAME)
        exit(-1)
    logger.debug("User '%s' created successfully." % PROMISE_USER_NAME)

    logger.info("Updating OpenStack credentials...")
    ks_creds.update({
        "username": PROMISE_TENANT_NAME,
        "password": PROMISE_TENANT_NAME,
        "tenant_name": PROMISE_TENANT_NAME,
    })

    nt_creds.update({
        "tenant_name": PROMISE_TENANT_NAME,
    })

    nv_creds.update({
        "project_id": PROMISE_TENANT_NAME,
    })

    glance = openstack_utils.get_glance_client()
    nova = nvclient.Client("2", **nv_creds)

    logger.info("Creating image '%s' from '%s'..." %
                (PROMISE_IMAGE_NAME, GLANCE_IMAGE_PATH))
    image_id = openstack_utils.create_glance_image(glance, PROMISE_IMAGE_NAME,
                                                   GLANCE_IMAGE_PATH)
    if not image_id:
        logger.error("Failed to create the Glance image...")
        exit(-1)
    logger.debug("Image '%s' with ID '%s' created successfully." %
                 (PROMISE_IMAGE_NAME, image_id))
    flavor_id = openstack_utils.get_flavor_id(nova, PROMISE_FLAVOR_NAME)
    if flavor_id == '':
        logger.info("Creating flavor '%s'..." % PROMISE_FLAVOR_NAME)
        flavor_id = openstack_utils.create_flavor(nova, PROMISE_FLAVOR_NAME,
                                                  PROMISE_FLAVOR_RAM,
                                                  PROMISE_FLAVOR_DISK,
                                                  PROMISE_FLAVOR_VCPUS)
        if not flavor_id:
            logger.error("Failed to create the Flavor...")
            exit(-1)
        logger.debug("Flavor '%s' with ID '%s' created successfully." %
                     (PROMISE_FLAVOR_NAME, flavor_id))
    else:
        logger.debug("Using existing flavor '%s' with ID '%s'..." %
                     (PROMISE_FLAVOR_NAME, flavor_id))

    neutron = ntclient.Client(**nt_creds)

    network_dic = openstack_utils.create_network_full(neutron,
                                                      PROMISE_NET_NAME,
                                                      PROMISE_SUBNET_NAME,
                                                      PROMISE_ROUTER_NAME,
                                                      PROMISE_SUBNET_CIDR)
    if not network_dic:
        logger.error("Failed to create the private network...")
        exit(-1)

    logger.info("Exporting environment variables...")
    os.environ["NODE_ENV"] = "functest"
    os.environ["OS_PASSWORD"] = PROMISE_USER_PWD
    os.environ["OS_TEST_IMAGE"] = image_id
    os.environ["OS_TEST_FLAVOR"] = flavor_id
    os.environ["OS_TEST_NETWORK"] = network_dic["net_id"]
    os.environ["OS_TENANT_NAME"] = PROMISE_TENANT_NAME
    os.environ["OS_USERNAME"] = PROMISE_USER_NAME

    os.chdir(PROMISE_REPO_DIR + '/source/')
    results_file_name = os.path.join(RESULTS_DIR, 'promise-results.json')
    results_file = open(results_file_name, 'w+')
    cmd = 'npm run -s test -- --reporter json'

    logger.info("Running command: %s" % cmd)
    ret = subprocess.call(cmd,
                          shell=True,
                          stdout=results_file,
                          stderr=subprocess.STDOUT)
    results_file.close()

    if ret == 0:
        logger.info("The test succeeded.")
        # test_status = 'OK'
    else:
        logger.info("The command '%s' failed." % cmd)
        # test_status = "Failed"

    # Print output of file
    with open(results_file_name, 'r') as results_file:
        data = results_file.read()
        logger.debug("\n%s" % data)
        json_data = json.loads(data)

        suites = json_data["stats"]["suites"]
        tests = json_data["stats"]["tests"]
        passes = json_data["stats"]["passes"]
        pending = json_data["stats"]["pending"]
        failures = json_data["stats"]["failures"]
        start_time_json = json_data["stats"]["start"]
        end_time = json_data["stats"]["end"]
        duration = float(json_data["stats"]["duration"]) / float(1000)

    logger.info("\n"
                "****************************************\n"
                "          Promise test report\n\n"
                "****************************************\n"
                " Suites:  \t%s\n"
                " Tests:   \t%s\n"
                " Passes:  \t%s\n"
                " Pending: \t%s\n"
                " Failures:\t%s\n"
                " Start:   \t%s\n"
                " End:     \t%s\n"
                " Duration:\t%s\n"
                "****************************************\n\n" %
                (suites, tests, passes, pending, failures, start_time_json,
                 end_time, duration))

    if args.report:
        stop_time = time.time()
        json_results = {
            "timestart": start_time,
            "duration": duration,
            "tests": int(tests),
            "failures": int(failures)
        }
        logger.debug("Promise Results json: " + str(json_results))

        # criteria for Promise in Release B was 100% of tests OK
        status = "FAIL"
        if int(tests) > 32 and int(failures) < 1:
            status = "PASS"
            exit_code = 0

        ft_utils.push_results_to_db("promise", "promise", start_time,
                                    stop_time, status, json_results)

    exit(exit_code)
示例#15
0
def main():
    results = Results(COMMON_CONFIG.line_length)

    results.add_to_summary(0, "=")
    results.add_to_summary(2, "STATUS", "SUBTEST")
    results.add_to_summary(0, "=")

    nova_client = os_utils.get_nova_client()
    neutron_client = os_utils.get_neutron_client()
    glance_client = os_utils.get_glance_client()

    image_id = os_utils.create_glance_image(glance_client,
                                            TESTCASE_CONFIG.image_name,
                                            COMMON_CONFIG.image_path,
                                            disk=COMMON_CONFIG.image_format,
                                            container="bare",
                                            public='public')
    network_1_id, _, router_1_id = test_utils.create_network(
        neutron_client, TESTCASE_CONFIG.net_1_name,
        TESTCASE_CONFIG.subnet_1_name, TESTCASE_CONFIG.subnet_1_cidr,
        TESTCASE_CONFIG.router_1_name)
    network_2_id = test_utils.create_net(neutron_client,
                                         TESTCASE_CONFIG.net_2_name)
    test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_2_name,
                             TESTCASE_CONFIG.subnet_2_cidr, network_2_id)

    sg_id = os_utils.create_security_group_full(neutron_client,
                                                TESTCASE_CONFIG.secgroup_name,
                                                TESTCASE_CONFIG.secgroup_descr)
    test_utils.open_icmp_ssh(neutron_client, sg_id)
    vm_2 = test_utils.create_instance(
        nova_client,
        TESTCASE_CONFIG.instance_2_name,
        image_id,
        network_2_id,
        sg_id,
        secgroup_name=TESTCASE_CONFIG.secgroup_name)
    vm_2_ip = test_utils.get_instance_ip(vm_2)

    u1 = test_utils.generate_ping_userdata([vm_2_ip])
    vm_1 = test_utils.create_instance(
        nova_client,
        TESTCASE_CONFIG.instance_1_name,
        image_id,
        network_1_id,
        sg_id,
        secgroup_name=TESTCASE_CONFIG.secgroup_name,
        userdata=u1)

    results.record_action("Create VPN with eRT==iRT")
    vpn_name = "sdnvpn-7"
    kwargs = {
        "import_targets": TESTCASE_CONFIG.targets,
        "export_targets": TESTCASE_CONFIG.targets,
        "route_distinguishers": TESTCASE_CONFIG.route_distinguishers,
        "name": vpn_name
    }
    bgpvpn = os_utils.create_bgpvpn(neutron_client, **kwargs)
    bgpvpn_id = bgpvpn['bgpvpn']['id']
    logger.debug("VPN created details: %s" % bgpvpn)

    msg = ("Associate router '%s' and net '%s' to the VPN." %
           (TESTCASE_CONFIG.router_1_name, TESTCASE_CONFIG.net_2_name))
    results.record_action(msg)
    results.add_to_summary(0, "-")

    os_utils.create_router_association(neutron_client, bgpvpn_id, router_1_id)
    os_utils.create_network_association(neutron_client, bgpvpn_id,
                                        network_2_id)

    test_utils.wait_for_bgp_router_assoc(neutron_client, bgpvpn_id,
                                         router_1_id)
    test_utils.wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, network_2_id)

    instances_up = test_utils.wait_for_instances_up(vm_1, vm_2)
    if not instances_up:
        logger.error("One or more instances is down")

    logger.info("Waiting for the VMs to connect to each other using the"
                " updated network configuration")
    test_utils.wait_before_subtest()

    results.get_ping_status(vm_1, vm_2, expected="PASS", timeout=200)
    results.add_to_summary(0, "=")

    msg = "Assign a Floating IP to %s" % vm_1.name
    results.record_action(msg)

    fip = os_utils.create_floating_ip(neutron_client)
    fip_added = os_utils.add_floating_ip(nova_client, vm_1.id, fip['fip_addr'])
    if fip_added:
        results.add_success(msg)
    else:
        results.add_failure(msg)

    results.record_action("Ping %s via Floating IP" % vm_1.name)
    results.add_to_summary(0, "-")
    results.ping_ip_test(fip['fip_addr'])

    return results.compile_summary()
示例#16
0
def main():
    results = Results(COMMON_CONFIG.line_length)
    results.add_to_summary(0, "=")
    results.add_to_summary(2, "STATUS", "SUBTEST")
    results.add_to_summary(0, "=")

    openstack_nodes = test_utils.get_nodes()

    # node.is_odl() doesn't work in Apex
    # https://jira.opnfv.org/browse/RELENG-192
    controllers = [
        node for node in openstack_nodes
        if "running" in node.run_cmd("sudo systemctl status opendaylight")
    ]
    computes = [node for node in openstack_nodes if node.is_compute()]

    msg = ("Verify that OpenDaylight can start/communicate with zrpcd/Quagga")
    results.record_action(msg)
    results.add_to_summary(0, "-")
    if not controllers:
        msg = ("Controller (ODL) list is empty. Skipping rest of tests.")
        logger.info(msg)
        results.add_failure(msg)
        return results.compile_summary()
    else:
        msg = ("Controller (ODL) list is ready")
        logger.info(msg)
        results.add_success(msg)

    controller = controllers[0]  # We don't handle HA well
    get_ext_ip_cmd = "sudo ip a | grep br-ex | grep inet | awk '{print $2}'"
    ext_net_cidr = controller.run_cmd(get_ext_ip_cmd).strip().split('\n')
    ext_net_mask = ext_net_cidr[0].split('/')[1]
    controller_ext_ip = ext_net_cidr[0].split('/')[0]

    logger.info("Starting bgp speaker of controller at IP %s " %
                controller_ext_ip)
    logger.info("Checking if zrpcd is " "running on the controller node")

    output_zrpcd = controller.run_cmd("ps --no-headers -C " "zrpcd -o state")
    states = output_zrpcd.split()
    running = any([s != 'Z' for s in states])

    msg = ("zrpcd is running")

    if not running:
        logger.info("zrpcd is not running on the controller node")
        results.add_failure(msg)
    else:
        logger.info("zrpcd is running on the controller node")
        results.add_success(msg)

    results.add_to_summary(0, "-")

    # Ensure that ZRPCD ip & port are well configured within ODL
    add_client_conn_to_bgp = "bgp-connect -p 7644 -h 127.0.0.1 add"
    test_utils.run_odl_cmd(controller, add_client_conn_to_bgp)

    # Start bgp daemon
    start_quagga = "odl:configure-bgp -op start-bgp-server " \
                   "--as-num 100 --router-id {0}".format(controller_ext_ip)
    test_utils.run_odl_cmd(controller, start_quagga)

    logger.info("Checking if bgpd is running" " on the controller node")

    # Check if there is a non-zombie bgpd process
    output_bgpd = controller.run_cmd("ps --no-headers -C " "bgpd -o state")
    states = output_bgpd.split()
    running = any([s != 'Z' for s in states])

    msg = ("bgpd is running")
    if not running:
        logger.info("bgpd is not running on the controller node")
        results.add_failure(msg)
    else:
        logger.info("bgpd is running on the controller node")
        results.add_success(msg)

    results.add_to_summary(0, "-")

    # We should be able to restart the speaker
    # but the test is disabled because of buggy upstream
    # https://github.com/6WIND/zrpcd/issues/15
    # stop_quagga = 'odl:configure-bgp -op stop-bgp-server'
    # test_utils.run_odl_cmd(controller, stop_quagga)

    # logger.info("Checking if bgpd is still running"
    #             " on the controller node")

    # output_bgpd = controller.run_cmd("ps --no-headers -C " \
    #                                  "bgpd -o state")
    # states = output_bgpd.split()
    # running = any([s != 'Z' for s in states])

    # msg = ("bgpd is stopped")
    # if not running:
    #     logger.info("bgpd is not running on the controller node")
    #     results.add_success(msg)
    # else:
    #     logger.info("bgpd is still running on the controller node")
    #     results.add_failure(msg)

    # Taken from the sfc tests
    if not os.path.isfile(COMMON_CONFIG.ubuntu_image_path):
        logger.info("Downloading image")
        ft_utils.download_url(
            "http://artifacts.opnfv.org/sdnvpn/"
            "ubuntu-16.04-server-cloudimg-amd64-disk1.img",
            "/home/opnfv/functest/data/")
    else:
        logger.info("Using old image")

    glance_client = os_utils.get_glance_client()
    nova_client = os_utils.get_nova_client()
    neutron_client = os_utils.get_neutron_client()

    (floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
     subnet_ids, interfaces, bgpvpn_ids, flavor_ids) = ([] for i in range(9))

    try:
        sg_id = os_utils.create_security_group_full(
            neutron_client, TESTCASE_CONFIG.secgroup_name,
            TESTCASE_CONFIG.secgroup_descr)
        test_utils.open_icmp(neutron_client, sg_id)
        test_utils.open_http_port(neutron_client, sg_id)

        test_utils.open_bgp_port(neutron_client, sg_id)
        net_id, subnet_1_id, router_1_id = test_utils.create_network(
            neutron_client, TESTCASE_CONFIG.net_1_name,
            TESTCASE_CONFIG.subnet_1_name, TESTCASE_CONFIG.subnet_1_cidr,
            TESTCASE_CONFIG.router_1_name)

        quagga_net_id, subnet_quagga_id, \
            router_quagga_id = test_utils.create_network(
                neutron_client,
                TESTCASE_CONFIG.quagga_net_name,
                TESTCASE_CONFIG.quagga_subnet_name,
                TESTCASE_CONFIG.quagga_subnet_cidr,
                TESTCASE_CONFIG.quagga_router_name)

        interfaces.append(tuple((router_1_id, subnet_1_id)))
        interfaces.append(tuple((router_quagga_id, subnet_quagga_id)))
        network_ids.extend([net_id, quagga_net_id])
        router_ids.extend([router_1_id, router_quagga_id])
        subnet_ids.extend([subnet_1_id, subnet_quagga_id])

        installer_type = str(os.environ['INSTALLER_TYPE'].lower())
        if installer_type == "fuel":
            disk = 'raw'
        elif installer_type == "apex":
            disk = 'qcow2'
        else:
            logger.error("Incompatible installer type")

        ubuntu_image_id = os_utils.create_glance_image(
            glance_client,
            COMMON_CONFIG.ubuntu_image_name,
            COMMON_CONFIG.ubuntu_image_path,
            disk,
            container="bare",
            public="public")

        image_ids.append(ubuntu_image_id)

        # NOTE(rski) The order of this seems a bit weird but
        # there is a reason for this, namely
        # https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-99
        # so we create the quagga instance using cloud-init
        # and immediately give it a floating IP.
        # The cloud-init script should contain a small sleep for
        # this to work.
        # We also create the FIP first because it is used in the
        # cloud-init script.
        fip = os_utils.create_floating_ip(neutron_client)
        # fake_fip is needed to bypass NAT
        # see below for the reason why.
        fake_fip = os_utils.create_floating_ip(neutron_client)

        floatingip_ids.extend([fip['fip_id'], fake_fip['fip_id']])
        # pin quagga to some compute
        compute_node = nova_client.hypervisors.list()[0]
        quagga_compute_node = "nova:" + compute_node.hypervisor_hostname
        # Map the hypervisor used above to a compute handle
        # returned by releng's manager
        for comp in computes:
            if compute_node.host_ip in comp.run_cmd("sudo ip a"):
                compute = comp
                break
        quagga_bootstrap_script = quagga.gen_quagga_setup_script(
            controller_ext_ip, fake_fip['fip_addr'], ext_net_mask)

        _, flavor_id = test_utils.create_custom_flavor()
        flavor_ids.append(flavor_id)

        quagga_vm = test_utils.create_instance(
            nova_client,
            TESTCASE_CONFIG.quagga_instance_name,
            ubuntu_image_id,
            quagga_net_id,
            sg_id,
            fixed_ip=TESTCASE_CONFIG.quagga_instance_ip,
            flavor=COMMON_CONFIG.custom_flavor_name,
            userdata=quagga_bootstrap_script,
            compute_node=quagga_compute_node)

        instance_ids.append(quagga_vm)

        fip_added = os_utils.add_floating_ip(nova_client, quagga_vm.id,
                                             fip['fip_addr'])

        msg = ("Assign a Floating IP to %s " %
               TESTCASE_CONFIG.quagga_instance_name)
        if fip_added:
            results.add_success(msg)
        else:
            results.add_failure(msg)
        test_utils.attach_instance_to_ext_br(quagga_vm, compute)

        try:
            testcase = "Bootstrap quagga inside an OpenStack instance"
            cloud_init_success = test_utils.wait_for_cloud_init(quagga_vm)
            if cloud_init_success:
                results.add_success(testcase)
            else:
                results.add_failure(testcase)
            results.add_to_summary(0, "=")

            results.add_to_summary(0, '-')
            results.add_to_summary(1, "Peer Quagga with OpenDaylight")
            results.add_to_summary(0, '-')

            neighbor = quagga.odl_add_neighbor(fake_fip['fip_addr'],
                                               controller_ext_ip, controller)
            peer = quagga.check_for_peering(controller)

        finally:
            test_utils.detach_instance_from_ext_br(quagga_vm, compute)

        if neighbor and peer:
            results.add_success("Peering with quagga")
        else:
            results.add_failure("Peering with quagga")

    except Exception as e:
        logger.error("exception occurred while executing testcase_3: %s", e)
        raise
    finally:
        test_utils.cleanup_nova(nova_client, instance_ids, flavor_ids)
        test_utils.cleanup_glance(glance_client, image_ids)
        test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
                                   interfaces, subnet_ids, router_ids,
                                   network_ids)

    return results.compile_summary()
示例#17
0
def main():
    deploymentHandler = DeploymentFactory.get_handler(
        COMMON_CONFIG.installer_type,
        COMMON_CONFIG.installer_ip,
        COMMON_CONFIG.installer_user,
        installer_pwd=COMMON_CONFIG.installer_password)

    cluster = COMMON_CONFIG.installer_cluster
    openstack_nodes = (deploymentHandler.get_nodes({'cluster': cluster})
                       if cluster is not None
                       else deploymentHandler.get_nodes())

    controller_nodes = [node for node in openstack_nodes
                        if node.is_controller()]
    compute_nodes = [node for node in openstack_nodes
                     if node.is_compute()]

    odl_ip, odl_port = test_utils.get_odl_ip_port(openstack_nodes)

    for compute in compute_nodes:
        logger.info("This is a compute: %s" % compute.info)

    results = Results(COMMON_CONFIG.line_length)
    results.add_to_summary(0, "=")
    results.add_to_summary(2, "STATUS", "SUBTEST")
    results.add_to_summary(0, "=")

    installer_type = os.environ.get("INSTALLER_TYPE")
    if installer_type != "fuel":
        logger.error(
            '\033[91mCurrently supported only Fuel Installer type\033[0m')
        sys.exit(1)

    installer_ip = os.environ.get("INSTALLER_IP")
    if not installer_ip:
        logger.error(
            '\033[91minstaller ip is not set\033[0m')
        logger.error(
            '\033[91mexport INSTALLER_IP=<ip>\033[0m')
        sys.exit(1)

    test_utils.setup_compute_node(TESTCASE_CONFIG.subnet_cidr, compute_nodes)
    test_utils.configure_iptables(controller_nodes)

    test_utils.download_image(COMMON_CONFIG.url,
                              COMMON_CONFIG.image_path)
    _, custom_flv_id = os_utils.get_or_create_flavor(
        COMMON_CONFIG.flavor,
        COMMON_CONFIG.ram_size_in_mb,
        COMMON_CONFIG.disk_size_in_gb,
        COMMON_CONFIG.vcpu_count, public=True)
    if not custom_flv_id:
        logger.error("Failed to create custom flavor")
        sys.exit(1)

    glance_client = os_utils.get_glance_client()
    neutron_client = os_utils.get_neutron_client()
    nova_client = os_utils.get_nova_client()
    tacker_client = os_tacker.get_tacker_client()

    controller_clients = test_utils.get_ssh_clients(controller_nodes)
    compute_clients = test_utils.get_ssh_clients(compute_nodes)

    ovs_logger = ovs_log.OVSLogger(
        os.path.join(COMMON_CONFIG.sfc_test_dir, 'ovs-logs'),
        COMMON_CONFIG.functest_results_dir)

    image_id = os_utils.create_glance_image(glance_client,
                                            COMMON_CONFIG.image_name,
                                            COMMON_CONFIG.image_path,
                                            COMMON_CONFIG.image_format,
                                            public='public')

    network_id = test_utils.setup_neutron(neutron_client,
                                          TESTCASE_CONFIG.net_name,
                                          TESTCASE_CONFIG.subnet_name,
                                          TESTCASE_CONFIG.router_name,
                                          TESTCASE_CONFIG.subnet_cidr)

    sg_id = test_utils.create_security_groups(neutron_client,
                                              TESTCASE_CONFIG.secgroup_name,
                                              TESTCASE_CONFIG.secgroup_descr)

    vnf_names = ['testVNF1', 'testVNF2']

    topo_seed = topo_shuffler.get_seed()  # change to None for nova av zone
    testTopology = topo_shuffler.topology(vnf_names, seed=topo_seed)

    logger.info('This test is run with the topology {0}'
                .format(testTopology['id']))
    logger.info('Topology description: {0}'
                .format(testTopology['description']))

    client_instance = test_utils.create_instance(
        nova_client, CLIENT, COMMON_CONFIG.flavor, image_id,
        network_id, sg_id, av_zone=testTopology['client'])

    server_instance = test_utils.create_instance(
        nova_client, SERVER, COMMON_CONFIG.flavor, image_id,
        network_id, sg_id, av_zone=testTopology['server'])

    server_ip = server_instance.networks.get(TESTCASE_CONFIG.net_name)[0]

    tosca_red = os.path.join(COMMON_CONFIG.sfc_test_dir,
                             COMMON_CONFIG.vnfd_dir,
                             TESTCASE_CONFIG.test_vnfd_red)
    os_tacker.create_vnfd(tacker_client, tosca_file=tosca_red)

    tosca_blue = os.path.join(COMMON_CONFIG.sfc_test_dir,
                              COMMON_CONFIG.vnfd_dir,
                              TESTCASE_CONFIG.test_vnfd_blue)
    os_tacker.create_vnfd(tacker_client, tosca_file=tosca_blue)

    default_param_file = os.path.join(
        COMMON_CONFIG.sfc_test_dir,
        COMMON_CONFIG.vnfd_dir,
        COMMON_CONFIG.vnfd_default_params_file)

    test_utils.create_vnf_in_av_zone(
        tacker_client, vnf_names[0], 'test-vnfd1',
        default_param_file, testTopology[vnf_names[0]])
    test_utils.create_vnf_in_av_zone(
        tacker_client, vnf_names[1], 'test-vnfd2',
        default_param_file, testTopology[vnf_names[1]])

    vnf1_id = os_tacker.wait_for_vnf(tacker_client, vnf_name=vnf_names[0])
    vnf2_id = os_tacker.wait_for_vnf(tacker_client, vnf_name=vnf_names[1])
    if vnf1_id is None or vnf2_id is None:
        logger.error('ERROR while booting vnfs')
        sys.exit(1)

    vnf1_instance_id = test_utils.get_nova_id(tacker_client, 'vdu1', vnf1_id)
    os_utils.add_secgroup_to_instance(nova_client, vnf1_instance_id, sg_id)

    vnf2_instance_id = test_utils.get_nova_id(tacker_client, 'vdu1', vnf2_id)
    os_utils.add_secgroup_to_instance(nova_client, vnf2_instance_id, sg_id)

    os_tacker.create_sfc(tacker_client, 'red', chain_vnf_names=['testVNF1'])
    os_tacker.create_sfc(tacker_client, 'blue', chain_vnf_names=['testVNF2'])

    os_tacker.create_sfc_classifier(
        tacker_client, 'red_http', sfc_name='red',
        match={
            'source_port': 0,
            'dest_port': 80,
            'protocol': 6
        })

    os_tacker.create_sfc_classifier(
        tacker_client, 'red_ssh', sfc_name='red',
        match={
            'source_port': 0,
            'dest_port': 22,
            'protocol': 6
        })

    logger.info(test_utils.run_cmd('tacker sfc-list')[1])
    logger.info(test_utils.run_cmd('tacker sfc-classifier-list')[1])

    # Start measuring the time it takes to implement the classification rules
    t1 = threading.Thread(target=test_utils.wait_for_classification_rules,
                          args=(ovs_logger, compute_nodes, odl_ip, odl_port,
                                testTopology,))

    try:
        t1.start()
    except Exception as e:
        logger.error("Unable to start the thread that counts time %s" % e)

    logger.info("Assigning floating IPs to instances")
    server_floating_ip = test_utils.assign_floating_ip(
        nova_client, neutron_client, server_instance.id)
    client_floating_ip = test_utils.assign_floating_ip(
        nova_client, neutron_client, client_instance.id)
    sf1_floating_ip = test_utils.assign_floating_ip(
        nova_client, neutron_client, vnf1_instance_id)
    sf2_floating_ip = test_utils.assign_floating_ip(
        nova_client, neutron_client, vnf2_instance_id)

    for ip in (server_floating_ip,
               client_floating_ip,
               sf1_floating_ip,
               sf2_floating_ip):
        logger.info("Checking connectivity towards floating IP [%s]" % ip)
        if not test_utils.ping(ip, retries=50, retry_timeout=1):
            logger.error("Cannot ping floating IP [%s]" % ip)
            sys.exit(1)
        logger.info("Successful ping to floating IP [%s]" % ip)

    if not test_utils.check_ssh([sf1_floating_ip, sf2_floating_ip]):
        logger.error("Cannot establish SSH connection to the SFs")
        sys.exit(1)

    logger.info("Starting HTTP server on %s" % server_floating_ip)
    if not test_utils.start_http_server(server_floating_ip):
        logger.error('\033[91mFailed to start HTTP server on %s\033[0m'
                     % server_floating_ip)
        sys.exit(1)

    logger.info("Starting SSH firewall on %s" % sf1_floating_ip)
    test_utils.start_vxlan_tool(sf1_floating_ip, block="22")
    logger.info("Starting HTTP firewall on %s" % sf2_floating_ip)
    test_utils.start_vxlan_tool(sf2_floating_ip, block="80")

    logger.info("Wait for ODL to update the classification rules in OVS")
    t1.join()

    logger.info("Test SSH")
    if test_utils.is_ssh_blocked(client_floating_ip, server_ip):
        results.add_to_summary(2, "PASS", "SSH Blocked")
    else:
        error = ('\033[91mTEST 1 [FAILED] ==> SSH NOT BLOCKED\033[0m')
        logger.error(error)
        test_utils.capture_ovs_logs(
            ovs_logger, controller_clients, compute_clients, error)
        results.add_to_summary(2, "FAIL", "SSH Blocked")

    logger.info("Test HTTP")
    if not test_utils.is_http_blocked(client_floating_ip, server_ip):
        results.add_to_summary(2, "PASS", "HTTP works")
    else:
        error = ('\033[91mTEST 2 [FAILED] ==> HTTP BLOCKED\033[0m')
        logger.error(error)
        test_utils.capture_ovs_logs(
            ovs_logger, controller_clients, compute_clients, error)
        results.add_to_summary(2, "FAIL", "HTTP works")

    logger.info("Changing the classification")
    test_utils.delete_classifier_and_acl(
        tacker_client, 'red_http', odl_ip, odl_port)

    test_utils.delete_classifier_and_acl(
        tacker_client, 'red_ssh', odl_ip, odl_port)

    os_tacker.create_sfc_classifier(
        tacker_client, 'blue_http', sfc_name='blue',
        match={
            'source_port': 0,
            'dest_port': 80,
            'protocol': 6
        })

    os_tacker.create_sfc_classifier(
        tacker_client, 'blue_ssh', sfc_name='blue',
        match={
            'source_port': 0,
            'dest_port': 22,
            'protocol': 6
        })

    logger.info(test_utils.run_cmd('tacker sfc-classifier-list')[1])

    # Start measuring the time it takes to implement the classification rules
    t2 = threading.Thread(target=test_utils.wait_for_classification_rules,
                          args=(ovs_logger, compute_nodes, odl_ip, odl_port,
                                testTopology,))
    try:
        t2.start()
    except Exception as e:
        logger.error("Unable to start the thread that counts time %s" % e)

    logger.info("Wait for ODL to update the classification rules in OVS")
    t2.join()

    logger.info("Test HTTP")
    if test_utils.is_http_blocked(client_floating_ip, server_ip):
        results.add_to_summary(2, "PASS", "HTTP Blocked")
    else:
        error = ('\033[91mTEST 3 [FAILED] ==> HTTP WORKS\033[0m')
        logger.error(error)
        test_utils.capture_ovs_logs(
            ovs_logger, controller_clients, compute_clients, error)
        results.add_to_summary(2, "FAIL", "HTTP Blocked")

    logger.info("Test SSH")
    if not test_utils.is_ssh_blocked(client_floating_ip, server_ip):
        results.add_to_summary(2, "PASS", "SSH works")
    else:
        error = ('\033[91mTEST 4 [FAILED] ==> SSH BLOCKED\033[0m')
        logger.error(error)
        test_utils.capture_ovs_logs(
            ovs_logger, controller_clients, compute_clients, error)
        results.add_to_summary(2, "FAIL", "SSH works")

    return results.compile_summary()
示例#18
0
def main():
    results = Results(COMMON_CONFIG.line_length)

    results.add_to_summary(0, "=")
    results.add_to_summary(2, "STATUS", "SUBTEST")
    results.add_to_summary(0, "=")

    nova_client = os_utils.get_nova_client()
    neutron_client = os_utils.get_neutron_client()
    glance_client = os_utils.get_glance_client()

    (floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
     subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))
    image_id = os_utils.create_glance_image(glance_client,
                                            TESTCASE_CONFIG.image_name,
                                            COMMON_CONFIG.image_path,
                                            disk=COMMON_CONFIG.image_format,
                                            container="bare",
                                            public='public')
    image_ids.append(image_id)

    network_1_id = test_utils.create_net(neutron_client,
                                         TESTCASE_CONFIG.net_1_name)
    subnet_1_id = test_utils.create_subnet(neutron_client,
                                           TESTCASE_CONFIG.subnet_1_name,
                                           TESTCASE_CONFIG.subnet_1_cidr,
                                           network_1_id)

    network_ids.append(network_1_id)
    subnet_ids.append(subnet_1_id)

    sg_id = os_utils.create_security_group_full(neutron_client,
                                                TESTCASE_CONFIG.secgroup_name,
                                                TESTCASE_CONFIG.secgroup_descr)

    compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)
    av_zone_1 = "nova:" + compute_nodes[0]
    av_zone_2 = "nova:" + compute_nodes[1]

    # boot INSTANCES
    vm_2 = test_utils.create_instance(
        nova_client,
        TESTCASE_CONFIG.instance_2_name,
        image_id,
        network_1_id,
        sg_id,
        secgroup_name=TESTCASE_CONFIG.secgroup_name,
        compute_node=av_zone_1)
    vm2_ip = test_utils.get_instance_ip(vm_2)

    u1 = test_utils.generate_ping_userdata([vm2_ip], 1)
    vm_1 = test_utils.create_instance(
        nova_client,
        TESTCASE_CONFIG.instance_1_name,
        image_id,
        network_1_id,
        sg_id,
        secgroup_name=TESTCASE_CONFIG.secgroup_name,
        compute_node=av_zone_1,
        userdata=u1)
    vm1_ip = test_utils.get_instance_ip(vm_1)

    u3 = test_utils.generate_ping_userdata([vm1_ip, vm2_ip], 1)
    vm_3 = test_utils.create_instance(
        nova_client,
        TESTCASE_CONFIG.instance_3_name,
        image_id,
        network_1_id,
        sg_id,
        secgroup_name=TESTCASE_CONFIG.secgroup_name,
        compute_node=av_zone_2,
        userdata=u3)
    vm3_ip = test_utils.get_instance_ip(vm_3)
    # We do not put vm_2 id in instance_ids table because we will
    # delete the current instance during the testing process
    instance_ids.extend([vm_1.id, vm_3.id])

    # Wait for VMs to get ips.
    instances_up = test_utils.wait_for_instances_up(vm_2)
    instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_1, vm_3)

    if (not instances_up or not instances_dhcp_up):
        logger.error("One or more instances are down")
        # TODO: Handle this appropriately
    # Create monitor threads to monitor traffic between vm_1, vm_2 and vm_3
    m = Manager()
    monitor_input1 = m.dict()
    monitor_output1 = m.dict()
    monitor_input1["stop_thread"] = False
    monitor_output1["error_msg"] = ""
    monitor_thread1 = Process(target=monitor, args=(monitor_input1,
                                                    monitor_output1, vm_1,))
    monitor_input2 = m.dict()
    monitor_output2 = m.dict()
    monitor_input2["stop_thread"] = False
    monitor_output2["error_msg"] = ""
    monitor_thread2 = Process(target=monitor, args=(monitor_input2,
                                                    monitor_output2, vm_2,))
    monitor_input3 = m.dict()
    monitor_output3 = m.dict()
    monitor_input3["stop_thread"] = False
    monitor_output3["error_msg"] = ""
    monitor_thread3 = Process(target=monitor, args=(monitor_input3,
                                                    monitor_output3, vm_3,))
    # Lists of all monitor threads and their inputs and outputs.
    threads = [monitor_thread1, monitor_thread2, monitor_thread3]
    thread_inputs = [monitor_input1, monitor_input2, monitor_input3]
    thread_outputs = [monitor_output1, monitor_output2, monitor_output3]
    try:
        logging.info("Starting all monitor threads")
        # Start all monitor threads
        for thread in threads:
            thread.start()
        logging.info("Wait before subtest")
        test_utils.wait_before_subtest()
        monitor_err_msg = ""
        for thread_output in thread_outputs:
            if thread_output["error_msg"] != "":
                monitor_err_msg += " ,{}".format(thread_output["error_msg"])
                thread_output["error_msg"] = ""
        results.record_action("Check ping status of vm_1, vm_2, and vm_3")
        results.add_to_summary(0, "-")
        if len(monitor_err_msg) == 0:
            results.add_success("Ping succeeds")
        else:
            results.add_failure(monitor_err_msg)
        # Stop monitor thread 2 and delete instance vm_2
        thread_inputs[1]["stop_thread"] = True
        if not os_utils.delete_instance(nova_client, vm_2.id):
            logging.error("Fail to delete vm_2 instance during "
                          "testing process")
            raise Exception("Fail to delete instance vm_2.")
        # Create a new vm (vm_4) on compute 1 node
        u4 = test_utils.generate_ping_userdata([vm1_ip, vm3_ip], 1)
        vm_4 = test_utils.create_instance(
            nova_client,
            TESTCASE_CONFIG.instance_4_name,
            image_id,
            network_1_id,
            sg_id,
            secgroup_name=TESTCASE_CONFIG.secgroup_name,
            compute_node=av_zone_1,
            userdata=u4)
        instance_ids.append(vm_4.id)

        # Wait for VMs to get ips.
        instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_4)
        if not instances_dhcp_up:
            logger.error("Instance vm_4 failed to start.")
            # TODO: Handle this appropriately
        # Create and start a new monitor thread for vm_4
        monitor_input4 = m.dict()
        monitor_output4 = m.dict()
        monitor_input4["stop_thread"] = False
        monitor_output4["error_msg"] = ""
        monitor_thread4 = Process(target=monitor, args=(monitor_input4,
                                                        monitor_output4,
                                                        vm_4,))
        threads.append(monitor_thread4)
        thread_inputs.append(monitor_input4)
        thread_outputs.append(monitor_output4)
        logging.info("Starting monitor thread of vm_4")
        threads[3].start()
        test_utils.wait_before_subtest()
        monitor_err_msg = ""
        for thread_output in thread_outputs:
            if thread_output["error_msg"] != "":
                monitor_err_msg += " ,{}".format(thread_output["error_msg"])
                thread_output["error_msg"] = ""
        results.record_action("Check ping status of vm_1, vm_3 and vm_4. "
                              "Instance vm_2 is deleted")
        results.add_to_summary(0, "-")
        if len(monitor_err_msg) == 0:
            results.add_success("Ping succeeds")
        else:
            results.add_failure(monitor_err_msg)

    except Exception as e:
        logger.error("exception occurred while executing testcase_10: %s", e)
        raise
    finally:
        # Give a stop signal to all threads
        logging.info("Sending stop signal to monitor thread")
        for thread_input in thread_inputs:
            thread_input["stop_thread"] = True
        # Wait for all threads to stop and return to the main process
        for thread in threads:
            thread.join()

        test_utils.cleanup_nova(nova_client, instance_ids)
        test_utils.cleanup_glance(glance_client, image_ids)
        test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
                                   interfaces, subnet_ids, router_ids,
                                   network_ids)

    return results.compile_summary()
示例#19
0
def main():
    results = Results(COMMON_CONFIG.line_length)

    results.add_to_summary(0, "=")
    results.add_to_summary(2, "STATUS", "SUBTEST")
    results.add_to_summary(0, "=")

    nova_client = os_utils.get_nova_client()
    neutron_client = os_utils.get_neutron_client()
    glance_client = os_utils.get_glance_client()

    logger.debug("Using private key %s injected to the VMs." %
                 COMMON_CONFIG.keyfile_path)
    keyfile = open(COMMON_CONFIG.keyfile_path, 'r')
    key = keyfile.read()
    keyfile.close()
    files = {"/home/cirros/id_rsa": key}

    image_id = os_utils.create_glance_image(glance_client,
                                            TESTCASE_CONFIG.image_name,
                                            COMMON_CONFIG.image_path,
                                            disk=COMMON_CONFIG.image_format,
                                            container="bare",
                                            public='public')
    network_1_id = test_utils.create_net(neutron_client,
                                         TESTCASE_CONFIG.net_1_name)
    test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_1a_name,
                             TESTCASE_CONFIG.subnet_1a_cidr, network_1_id)
    test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_1b_name,
                             TESTCASE_CONFIG.subnet_1b_cidr, network_1_id)

    network_2_id = test_utils.create_net(neutron_client,
                                         TESTCASE_CONFIG.net_2_name)
    test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_2a_name,
                             TESTCASE_CONFIG.subnet_2a_cidr, network_2_id)
    test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_2b_name,
                             TESTCASE_CONFIG.subnet_2b_cidr, network_2_id)

    sg_id = os_utils.create_security_group_full(neutron_client,
                                                TESTCASE_CONFIG.secgroup_name,
                                                TESTCASE_CONFIG.secgroup_descr)

    compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)

    av_zone_1 = "nova:" + compute_nodes[0]
    av_zone_2 = "nova:" + compute_nodes[1]

    # boot INTANCES
    userdata_common = test_utils.generate_userdata_common()
    vm_2 = test_utils.create_instance(
        nova_client,
        TESTCASE_CONFIG.instance_2_name,
        image_id,
        network_1_id,
        sg_id,
        fixed_ip=TESTCASE_CONFIG.instance_2_ip,
        secgroup_name=TESTCASE_CONFIG.secgroup_name,
        compute_node=av_zone_1,
        userdata=userdata_common)

    vm_3 = test_utils.create_instance(
        nova_client,
        TESTCASE_CONFIG.instance_3_name,
        image_id,
        network_1_id,
        sg_id,
        fixed_ip=TESTCASE_CONFIG.instance_3_ip,
        secgroup_name=TESTCASE_CONFIG.secgroup_name,
        compute_node=av_zone_2,
        userdata=userdata_common)

    vm_5 = test_utils.create_instance(
        nova_client,
        TESTCASE_CONFIG.instance_5_name,
        image_id,
        network_2_id,
        sg_id,
        fixed_ip=TESTCASE_CONFIG.instance_5_ip,
        secgroup_name=TESTCASE_CONFIG.secgroup_name,
        compute_node=av_zone_2,
        userdata=userdata_common)

    # We boot vm5 first because we need vm5_ip for vm4 userdata
    u4 = test_utils.generate_userdata_with_ssh([
        TESTCASE_CONFIG.instance_1_ip, TESTCASE_CONFIG.instance_3_ip,
        TESTCASE_CONFIG.instance_5_ip
    ])
    vm_4 = test_utils.create_instance(
        nova_client,
        TESTCASE_CONFIG.instance_4_name,
        image_id,
        network_2_id,
        sg_id,
        fixed_ip=TESTCASE_CONFIG.instance_4_ip,
        secgroup_name=TESTCASE_CONFIG.secgroup_name,
        compute_node=av_zone_1,
        userdata=u4,
        files=files)

    # We boot VM1 at the end because we need to get the IPs first to generate
    # the userdata
    u1 = test_utils.generate_userdata_with_ssh([
        TESTCASE_CONFIG.instance_2_ip, TESTCASE_CONFIG.instance_3_ip,
        TESTCASE_CONFIG.instance_4_ip, TESTCASE_CONFIG.instance_5_ip
    ])
    vm_1 = test_utils.create_instance(
        nova_client,
        TESTCASE_CONFIG.instance_1_name,
        image_id,
        network_1_id,
        sg_id,
        fixed_ip=TESTCASE_CONFIG.instance_1_ip,
        secgroup_name=TESTCASE_CONFIG.secgroup_name,
        compute_node=av_zone_1,
        userdata=u1,
        files=files)

    msg = ("Create VPN1 with eRT=iRT")
    results.record_action(msg)
    vpn1_name = "sdnvpn-1-" + str(randint(100000, 999999))
    kwargs = {
        "import_targets": TESTCASE_CONFIG.targets2,
        "export_targets": TESTCASE_CONFIG.targets2,
        "route_targets": TESTCASE_CONFIG.targets2,
        "route_distinguishers": TESTCASE_CONFIG.route_distinguishers1,
        "name": vpn1_name
    }
    bgpvpn1 = os_utils.create_bgpvpn(neutron_client, **kwargs)
    bgpvpn1_id = bgpvpn1['bgpvpn']['id']
    logger.debug("VPN1 created details: %s" % bgpvpn1)

    msg = ("Associate network '%s' to the VPN." % TESTCASE_CONFIG.net_1_name)
    results.record_action(msg)
    results.add_to_summary(0, "-")

    os_utils.create_network_association(neutron_client, bgpvpn1_id,
                                        network_1_id)

    # Wait for VMs to get ips.
    instances_up = test_utils.wait_for_instances_up(vm_1, vm_2, vm_3, vm_4,
                                                    vm_5)

    if not instances_up:
        logger.error("One or more instances is down")
        sys.exit(-1)

    logger.info("Waiting for the VMs to connect to each other using the"
                " updated network configuration")
    test_utils.wait_before_subtest()

    # 10.10.10.12 should return sdnvpn-2 to sdnvpn-1
    results.check_ssh_output(vm_1,
                             vm_2,
                             expected=TESTCASE_CONFIG.instance_2_name,
                             timeout=200)
    # 10.10.11.13 should return sdnvpn-3 to sdnvpn-1
    results.check_ssh_output(vm_1,
                             vm_3,
                             expected=TESTCASE_CONFIG.instance_3_name,
                             timeout=30)

    results.add_to_summary(0, "-")
    msg = ("Create VPN2 with eRT=iRT")
    results.record_action(msg)
    vpn2_name = "sdnvpn-2-" + str(randint(100000, 999999))
    kwargs = {
        "import_targets": TESTCASE_CONFIG.targets1,
        "export_targets": TESTCASE_CONFIG.targets1,
        "route_targets": TESTCASE_CONFIG.targets1,
        "route_distinguishers": TESTCASE_CONFIG.route_distinguishers2,
        "name": vpn2_name
    }
    bgpvpn2 = os_utils.create_bgpvpn(neutron_client, **kwargs)
    bgpvpn2_id = bgpvpn2['bgpvpn']['id']
    logger.debug("VPN created details: %s" % bgpvpn2)

    msg = ("Associate network '%s' to the VPN2." % TESTCASE_CONFIG.net_2_name)
    results.record_action(msg)
    results.add_to_summary(0, "-")

    os_utils.create_network_association(neutron_client, bgpvpn2_id,
                                        network_2_id)

    test_utils.wait_for_bgp_net_assoc(neutron_client, bgpvpn1_id, network_1_id)
    test_utils.wait_for_bgp_net_assoc(neutron_client, bgpvpn2_id, network_2_id)

    logger.info("Waiting for the VMs to connect to each other using the"
                " updated network configuration")
    test_utils.wait_before_subtest()

    # 10.10.11.13 should return sdnvpn-5 to sdnvpn-4
    results.check_ssh_output(vm_4,
                             vm_5,
                             expected=TESTCASE_CONFIG.instance_5_name,
                             timeout=30)

    # 10.10.10.11 should return "not reachable" to sdnvpn-4
    results.check_ssh_output(vm_4, vm_1, expected="not reachable", timeout=30)

    return results.compile_summary()
示例#20
0
def main():
    results = Results(COMMON_CONFIG.line_length)

    results.add_to_summary(0, "=")
    results.add_to_summary(2, "STATUS", "SUBTEST")
    results.add_to_summary(0, "=")

    nova_client = os_utils.get_nova_client()
    neutron_client = os_utils.get_neutron_client()
    glance_client = os_utils.get_glance_client()

    (floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
     subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))

    try:
        logger.debug("Using private key %s injected to the VMs." %
                     COMMON_CONFIG.keyfile_path)
        keyfile = open(COMMON_CONFIG.keyfile_path, 'r')
        key = keyfile.read()
        keyfile.close()
        files = {"/home/cirros/id_rsa": key}

        image_id = os_utils.create_glance_image(
            glance_client,
            TESTCASE_CONFIG.image_name,
            COMMON_CONFIG.image_path,
            disk=COMMON_CONFIG.image_format,
            container="bare",
            public='public')
        image_ids.append(image_id)

        network_1_id = test_utils.create_net(neutron_client,
                                             TESTCASE_CONFIG.net_1_name)
        subnet_1a_id = test_utils.create_subnet(neutron_client,
                                                TESTCASE_CONFIG.subnet_1a_name,
                                                TESTCASE_CONFIG.subnet_1a_cidr,
                                                network_1_id)
        # TODO: uncomment the commented lines once ODL has
        # support for mulitple subnets under same neutron network
        # subnet_1b_id = test_utils.create_subnet(
        #     neutron_client,
        #     TESTCASE_CONFIG.subnet_1b_name,
        #     TESTCASE_CONFIG.subnet_1b_cidr,
        #     network_1_id)

        network_2_id = test_utils.create_net(neutron_client,
                                             TESTCASE_CONFIG.net_2_name)
        # subnet_2a_id = test_utils.create_subnet(
        #     neutron_client,
        #     TESTCASE_CONFIG.subnet_2a_name,
        #     TESTCASE_CONFIG.subnet_2a_cidr,
        #     network_2_id)
        subnet_2b_id = test_utils.create_subnet(neutron_client,
                                                TESTCASE_CONFIG.subnet_2b_name,
                                                TESTCASE_CONFIG.subnet_2b_cidr,
                                                network_2_id)
        network_ids.extend([network_1_id, network_2_id])
        subnet_ids.extend([
            subnet_1a_id,
            # subnet_1b_id,
            # subnet_2a_id,
            subnet_2b_id
        ])

        sg_id = os_utils.create_security_group_full(
            neutron_client, TESTCASE_CONFIG.secgroup_name,
            TESTCASE_CONFIG.secgroup_descr)

        compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)

        av_zone_1 = "nova:" + compute_nodes[0]
        # av_zone_2 = "nova:" + compute_nodes[1]

        # boot INTANCES
        userdata_common = test_utils.generate_userdata_common()
        vm_2 = test_utils.create_instance(
            nova_client,
            TESTCASE_CONFIG.instance_2_name,
            image_id,
            network_1_id,
            sg_id,
            fixed_ip=TESTCASE_CONFIG.instance_2_ip,
            secgroup_name=TESTCASE_CONFIG.secgroup_name,
            compute_node=av_zone_1,
            userdata=userdata_common)

        #         vm_3 = test_utils.create_instance(
        #             nova_client,
        #             TESTCASE_CONFIG.instance_3_name,
        #             image_id,
        #             network_1_id,
        #             sg_id,
        #             fixed_ip=TESTCASE_CONFIG.instance_3_ip,
        #             secgroup_name=TESTCASE_CONFIG.secgroup_name,
        #             compute_node=av_zone_2,
        #             userdata=userdata_common)
        #
        #         vm_5 = test_utils.create_instance(
        #             nova_client,
        #             TESTCASE_CONFIG.instance_5_name,
        #             image_id,
        #             network_2_id,
        #             sg_id,
        #             fixed_ip=TESTCASE_CONFIG.instance_5_ip,
        #             secgroup_name=TESTCASE_CONFIG.secgroup_name,
        #             compute_node=av_zone_2,
        #             userdata=userdata_common)

        # We boot vm5 first because we need vm5_ip for vm4 userdata
        u4 = test_utils.generate_userdata_with_ssh([
            TESTCASE_CONFIG.instance_1_ip
            # TESTCASE_CONFIG.instance_3_ip,
            # TESTCASE_CONFIG.instance_5_ip
        ])
        vm_4 = test_utils.create_instance(
            nova_client,
            TESTCASE_CONFIG.instance_4_name,
            image_id,
            network_2_id,
            sg_id,
            fixed_ip=TESTCASE_CONFIG.instance_4_ip,
            secgroup_name=TESTCASE_CONFIG.secgroup_name,
            compute_node=av_zone_1,
            userdata=u4,
            files=files)

        # We boot VM1 at the end because we need to get the IPs first
        # to generate the userdata
        u1 = test_utils.generate_userdata_with_ssh([
            TESTCASE_CONFIG.instance_2_ip,
            # TESTCASE_CONFIG.instance_3_ip,
            TESTCASE_CONFIG.instance_4_ip,
            # TESTCASE_CONFIG.instance_5_ip
        ])
        vm_1 = test_utils.create_instance(
            nova_client,
            TESTCASE_CONFIG.instance_1_name,
            image_id,
            network_1_id,
            sg_id,
            fixed_ip=TESTCASE_CONFIG.instance_1_ip,
            secgroup_name=TESTCASE_CONFIG.secgroup_name,
            compute_node=av_zone_1,
            userdata=u1,
            files=files)
        instance_ids.extend([
            vm_1.id,
            vm_2.id,
            # vm_3.id,
            vm_4.id,
            #  vm_5.id
        ])

        msg = ("Create VPN1 with eRT=iRT")
        results.record_action(msg)
        vpn1_name = "sdnvpn-1-" + str(randint(100000, 999999))
        kwargs = {
            "import_targets": TESTCASE_CONFIG.targets2,
            "export_targets": TESTCASE_CONFIG.targets2,
            "route_targets": TESTCASE_CONFIG.targets2,
            "route_distinguishers": TESTCASE_CONFIG.route_distinguishers1,
            "name": vpn1_name
        }
        bgpvpn1 = test_utils.create_bgpvpn(neutron_client, **kwargs)
        bgpvpn1_id = bgpvpn1['bgpvpn']['id']
        logger.debug("VPN1 created details: %s" % bgpvpn1)
        bgpvpn_ids.append(bgpvpn1_id)

        msg = ("Associate network '%s' to the VPN." %
               TESTCASE_CONFIG.net_1_name)
        results.record_action(msg)
        results.add_to_summary(0, "-")

        test_utils.create_network_association(neutron_client, bgpvpn1_id,
                                              network_1_id)

        # Wait for VMs to get ips.
        instances_up = test_utils.wait_for_instances_up(vm_2)
        instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_1, vm_4)

        if (not instances_up or not instances_dhcp_up):
            logger.error("One or more instances are down")
            # TODO: Handle this appropriately

        logger.info("Waiting for the VMs to connect to each other using the"
                    " updated network configuration")
        test_utils.wait_before_subtest()

        # 10.10.10.12 should return sdnvpn-2 to sdnvpn-1
        results.check_ssh_output(vm_1,
                                 vm_2,
                                 expected=TESTCASE_CONFIG.instance_2_name,
                                 timeout=200)
        # 10.10.11.13 should return sdnvpn-3 to sdnvpn-1
        # results.check_ssh_output(vm_1, vm_3,
        #                          expected=TESTCASE_CONFIG.instance_3_name,
        #                          timeout=30)

        results.add_to_summary(0, "-")
        msg = ("Create VPN2 with eRT=iRT")
        results.record_action(msg)
        vpn2_name = "sdnvpn-2-" + str(randint(100000, 999999))
        kwargs = {
            "import_targets": TESTCASE_CONFIG.targets1,
            "export_targets": TESTCASE_CONFIG.targets1,
            "route_targets": TESTCASE_CONFIG.targets1,
            "route_distinguishers": TESTCASE_CONFIG.route_distinguishers2,
            "name": vpn2_name
        }
        bgpvpn2 = test_utils.create_bgpvpn(neutron_client, **kwargs)
        bgpvpn2_id = bgpvpn2['bgpvpn']['id']
        logger.debug("VPN created details: %s" % bgpvpn2)
        bgpvpn_ids.append(bgpvpn2_id)

        msg = ("Associate network '%s' to the VPN2." %
               TESTCASE_CONFIG.net_2_name)
        results.record_action(msg)
        results.add_to_summary(0, "-")

        test_utils.create_network_association(neutron_client, bgpvpn2_id,
                                              network_2_id)

        test_utils.wait_for_bgp_net_assoc(neutron_client, bgpvpn1_id,
                                          network_1_id)
        test_utils.wait_for_bgp_net_assoc(neutron_client, bgpvpn2_id,
                                          network_2_id)

        logger.info("Waiting for the VMs to connect to each other using the"
                    " updated network configuration")
        test_utils.wait_before_subtest()

        # 10.10.11.13 should return sdnvpn-5 to sdnvpn-4
        # results.check_ssh_output(vm_4, vm_5,
        #                          expected=TESTCASE_CONFIG.instance_5_name,
        #                          timeout=30)

        # 10.10.10.11 should return "not reachable" to sdnvpn-4
        results.check_ssh_output(vm_4,
                                 vm_1,
                                 expected="not reachable",
                                 timeout=30)

    except Exception as e:
        logger.error("exception occurred while executing testcase_2: %s", e)
        raise
    finally:
        test_utils.cleanup_nova(nova_client, instance_ids)
        test_utils.cleanup_glance(glance_client, image_ids)
        test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
                                   interfaces, subnet_ids, router_ids,
                                   network_ids)

    return results.compile_summary()
示例#21
0
def main():
    global SUMMARY
    global network_dict
    start_time = time.time()
    stop_time = start_time

    # configure script
    if not (args.test_name in tests):
        logger.error('argument not valid')
        exit(-1)

    SUMMARY = []
    creds_nova = openstack_utils.get_credentials("nova")
    nova_client = novaclient.Client('2', **creds_nova)
    creds_neutron = openstack_utils.get_credentials("neutron")
    neutron_client = neutronclient.Client(**creds_neutron)
    creds_keystone = openstack_utils.get_credentials("keystone")
    keystone_client = keystoneclient.Client(**creds_keystone)
    glance_endpoint = keystone_client.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance_client = glanceclient.Client(1, glance_endpoint,
                                        token=keystone_client.auth_token)
    creds_cinder = openstack_utils.get_credentials("cinder")
    cinder_client = cinderclient.Client('2', creds_cinder['username'],
                                        creds_cinder['api_key'],
                                        creds_cinder['project_id'],
                                        creds_cinder['auth_url'],
                                        service_type="volume")

    client_dict['neutron'] = neutron_client

    volume_types = openstack_utils.list_volume_types(cinder_client,
                                                     private=False)
    if not volume_types:
        volume_type = openstack_utils.create_volume_type(
            cinder_client, CINDER_VOLUME_TYPE_NAME)
        if not volume_type:
            logger.error("Failed to create volume type...")
            exit(-1)
        else:
            logger.debug("Volume type '%s' created succesfully..."
                         % CINDER_VOLUME_TYPE_NAME)
    else:
        logger.debug("Using existing volume type(s)...")

    image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
    image_exists = False

    if image_id == '':
        logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
                                                           GLANCE_IMAGE_PATH))
        image_id = openstack_utils.create_glance_image(glance_client,
                                                       GLANCE_IMAGE_NAME,
                                                       GLANCE_IMAGE_PATH)
        if not image_id:
            logger.error("Failed to create the Glance image...")
            exit(-1)
        else:
            logger.debug("Image '%s' with ID '%s' created succesfully ."
                         % (GLANCE_IMAGE_NAME, image_id))
    else:
        logger.debug("Using existing image '%s' with ID '%s'..."
                     % (GLANCE_IMAGE_NAME, image_id))
        image_exists = True

    logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
    network_dict = openstack_utils.create_network_full(logger,
                                                       client_dict['neutron'],
                                                       PRIVATE_NET_NAME,
                                                       PRIVATE_SUBNET_NAME,
                                                       ROUTER_NAME,
                                                       PRIVATE_SUBNET_CIDR)
    if not network_dict:
        logger.error("Failed to create network...")
        exit(-1)
    else:
        if not openstack_utils.update_neutron_net(client_dict['neutron'],
                                                  network_dict['net_id'],
                                                  shared=True):
            logger.error("Failed to update network...")
            exit(-1)
        else:
            logger.debug("Network '%s' available..." % PRIVATE_NET_NAME)

    if args.test_name == "all":
        for test_name in tests:
            if not (test_name == 'all' or
                    test_name == 'vm'):
                run_task(test_name)
    else:
        logger.debug("Test name: " + args.test_name)
        run_task(args.test_name)

    report = ("\n"
              "                                                              "
              "\n"
              "                     Rally Summary Report\n"
              "\n"
              "+===================+============+===============+===========+"
              "\n"
              "| Module            | Duration   | nb. Test Run  | Success   |"
              "\n"
              "+===================+============+===============+===========+"
              "\n")
    payload = []
    stop_time = time.time()

    # for each scenario we draw a row for the table
    total_duration = 0.0
    total_nb_tests = 0
    total_success = 0.0
    for s in SUMMARY:
        name = "{0:<17}".format(s['test_name'])
        duration = float(s['overall_duration'])
        total_duration += duration
        duration = time.strftime("%M:%S", time.gmtime(duration))
        duration = "{0:<10}".format(duration)
        nb_tests = "{0:<13}".format(s['nb_tests'])
        total_nb_tests += int(s['nb_tests'])
        success = "{0:<10}".format(str(s['success']) + '%')
        total_success += float(s['success'])
        report += ("" +
                   "| " + name + " | " + duration + " | " +
                   nb_tests + " | " + success + "|\n" +
                   "+-------------------+------------"
                   "+---------------+-----------+\n")
        payload.append({'module': name,
                        'details': {'duration': s['overall_duration'],
                                    'nb tests': s['nb_tests'],
                                    'success': s['success']}})

    total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
    total_duration_str2 = "{0:<10}".format(total_duration_str)
    total_nb_tests_str = "{0:<13}".format(total_nb_tests)
    total_success = "{:0.2f}".format(total_success / len(SUMMARY))
    total_success_str = "{0:<10}".format(str(total_success) + '%')
    report += "+===================+============+===============+===========+"
    report += "\n"
    report += ("| TOTAL:            | " + total_duration_str2 + " | " +
               total_nb_tests_str + " | " + total_success_str + "|\n")
    report += "+===================+============+===============+===========+"
    report += "\n"

    logger.info("\n" + report)
    payload.append({'summary': {'duration': total_duration,
                                'nb tests': total_nb_tests,
                                'nb success': total_success}})

    # Generate json results for DB
    # json_results = {"timestart": time_start, "duration": total_duration,
    #                "tests": int(total_nb_tests),
    #                "success": int(total_success)}
    # logger.info("Results: "+str(json_results))

    # Evaluation of the success criteria
    status = "failed"
    # for Rally we decided that the overall success rate must be above 90%
    if total_success >= 90:
        status = "passed"

    if args.sanity:
        case_name = "rally_sanity"
    else:
        case_name = "rally_full"

    if args.report:
        logger.debug("Pushing Rally summary into DB...")
        functest_utils.push_results_to_db("functest",
                                          case_name,
                                          None,
                                          start_time,
                                          stop_time,
                                          status,
                                          payload)
    if args.noclean:
        exit(0)

    if not image_exists:
        logger.debug("Deleting image '%s' with ID '%s'..."
                     % (GLANCE_IMAGE_NAME, image_id))
        if not openstack_utils.delete_glance_image(nova_client, image_id):
            logger.error("Error deleting the glance image")

    if not volume_types:
        logger.debug("Deleting volume type '%s'..."
                     % CINDER_VOLUME_TYPE_NAME)
        if not openstack_utils.delete_volume_type(cinder_client, volume_type):
            logger.error("Error in deleting volume type...")
示例#22
0
def main():

    creds_nova = openstack_utils.get_credentials("nova")
    nova_client = novaclient.Client('2', **creds_nova)
    creds_neutron = openstack_utils.get_credentials("neutron")
    neutron_client = neutronclient.Client(**creds_neutron)
    creds_keystone = openstack_utils.get_credentials("keystone")
    keystone_client = keystoneclient.Client(**creds_keystone)
    glance_endpoint = keystone_client.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance_client = glanceclient.Client(1, glance_endpoint,
                                        token=keystone_client.auth_token)
    EXIT_CODE = -1

    image_id = None
    flavor = None

    # Check if the given image exists
    image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
    if image_id != '':
        logger.info("Using existing image '%s'..." % GLANCE_IMAGE_NAME)
        global image_exists
        image_exists = True
    else:
        logger.info("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
                                                          GLANCE_IMAGE_PATH))
        image_id = openstack_utils.create_glance_image(glance_client,
                                                       GLANCE_IMAGE_NAME,
                                                       GLANCE_IMAGE_PATH)
        if not image_id:
            logger.error("Failed to create a Glance image...")
            return(EXIT_CODE)
        logger.debug("Image '%s' with ID=%s created successfully."
                     % (GLANCE_IMAGE_NAME, image_id))

    network_dic = openstack_utils.create_network_full(logger,
                                                      neutron_client,
                                                      PRIVATE_NET_NAME,
                                                      PRIVATE_SUBNET_NAME,
                                                      ROUTER_NAME,
                                                      PRIVATE_SUBNET_CIDR)
    if not network_dic:
        logger.error(
            "There has been a problem when creating the neutron network")
        return(EXIT_CODE)
    network_id = network_dic["net_id"]

    create_security_group(neutron_client)

    # Check if the given flavor exists
    try:
        flavor = nova_client.flavors.find(name=FLAVOR)
        logger.info("Flavor found '%s'" % FLAVOR)
    except:
        logger.error("Flavor '%s' not found." % FLAVOR)
        logger.info("Available flavors are: ")
        pMsg(nova_client.flavor.list())
        exit(-1)

    # Deleting instances if they exist
    servers = nova_client.servers.list()
    for server in servers:
        if server.name == NAME_VM_1 or server.name == NAME_VM_2:
            logger.info("Instance %s found. Deleting..." % server.name)
            server.delete()

    # boot VM 1
    # basic boot
    # tune (e.g. flavor, images, network) to your specific
    # openstack configuration here
    # we consider start time at VM1 booting
    start_time = time.time()
    stop_time = start_time
    logger.info("vPing Start Time:'%s'" % (
        datetime.datetime.fromtimestamp(start_time).strftime(
            '%Y-%m-%d %H:%M:%S')))

    # create VM
    logger.info("Creating instance '%s'..." % NAME_VM_1)
    logger.debug(
        "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
        "network=%s \n" % (NAME_VM_1, flavor, image_id, network_id))
    vm1 = nova_client.servers.create(
        name=NAME_VM_1,
        flavor=flavor,
        image=image_id,
        config_drive=True,
        nics=[{"net-id": network_id}]
    )

    # wait until VM status is active
    if not waitVmActive(nova_client, vm1):

        logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
            NAME_VM_1, openstack_utils.get_instance_status(nova_client, vm1)))
        return (EXIT_CODE)
    else:
        logger.info("Instance '%s' is ACTIVE." % NAME_VM_1)

    # Retrieve IP of first VM
    test_ip = vm1.networks.get(PRIVATE_NET_NAME)[0]
    logger.debug("Instance '%s' got %s" % (NAME_VM_1, test_ip))

    # boot VM 2
    # we will boot then execute a ping script with cloud-init
    # the long chain corresponds to the ping procedure converted with base 64
    # tune (e.g. flavor, images, network) to your specific openstack
    #  configuration here
    u = ("#!/bin/sh\n\nwhile true; do\n ping -c 1 %s 2>&1 >/dev/null\n "
         "RES=$?\n if [ \"Z$RES\" = \"Z0\" ] ; then\n  echo 'vPing OK'\n "
         "break\n else\n  echo 'vPing KO'\n fi\n sleep 1\ndone\n" % test_ip)

    # create VM
    logger.info("Creating instance '%s'..." % NAME_VM_2)
    logger.debug(
        "Configuration:\n name=%s \n flavor=%s \n image=%s \n network=%s "
        "\n userdata= \n%s" % (
            NAME_VM_2, flavor, image_id, network_id, u))
    vm2 = nova_client.servers.create(
        name=NAME_VM_2,
        flavor=flavor,
        image=image_id,
        nics=[{"net-id": network_id}],
        config_drive=True,
        userdata=u
    )

    if not waitVmActive(nova_client, vm2):
        logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
            NAME_VM_2, openstack_utils.get_instance_status(nova_client, vm2)))
        return (EXIT_CODE)
    else:
        logger.info("Instance '%s' is ACTIVE." % NAME_VM_2)

    logger.info("Waiting for ping...")
    sec = 0
    metadata_tries = 0
    console_log = vm2.get_console_output()
    duration = 0
    stop_time = time.time()

    while True:
        time.sleep(1)
        console_log = vm2.get_console_output()
        # print "--"+console_log
        # report if the test is failed
        if "vPing OK" in console_log:
            logger.info("vPing detected!")

            # we consider start time at VM1 booting
            stop_time = time.time()
            duration = round(stop_time - start_time, 1)
            logger.info("vPing duration:'%s'" % duration)
            EXIT_CODE = 0
            break
        elif ("failed to read iid from metadata" in console_log or
              metadata_tries > 5):
            EXIT_CODE = -2
            break
        elif sec == PING_TIMEOUT:
            logger.info("Timeout reached.")
            break
        elif sec % 10 == 0:
            if "request failed" in console_log:
                logger.debug("It seems userdata is not supported in "
                             "nova boot. Waiting a bit...")
                metadata_tries += 1
            else:
                logger.debug("Pinging %s. Waiting for response..." % test_ip)
        sec += 1

    test_status = "NOK"
    if EXIT_CODE == 0:
        logger.info("vPing OK")
        test_status = "OK"
    elif EXIT_CODE == -2:
        duration = 0
        logger.info("Userdata is not supported in nova boot. Aborting test...")
    else:
        duration = 0
        logger.error("vPing FAILED")

    if args.report:
        try:
            logger.debug("Pushing vPing userdata results into DB...")
            functest_utils.push_results_to_db("functest",
                                              "vping_userdata",
                                              logger,
                                              start_time,
                                              stop_time,
                                              test_status,
                                              details={'timestart': start_time,
                                                       'duration': duration,
                                                       'status': test_status})
        except:
            logger.error("Error pushing results into Database '%s'"
                         % sys.exc_info()[0])

    exit(EXIT_CODE)
示例#23
0
def main():

    nova_client = os_utils.get_nova_client()
    neutron_client = os_utils.get_neutron_client()
    glance_client = os_utils.get_glance_client()

# Download the image

    if not os.path.isfile(IMAGE_PATH):
        logger.info("Downloading image")
        ft_utils.download_url(
            "http://artifacts.opnfv.org/sfc/demo/sf_summit2016.qcow2",
            "/home/opnfv/functest/data/")
    else:
        logger.info("Using old image")

# Allow any port so that tacker commands reaches the server.
# CHECK IF THIS STILL MAKES SENSE WHEN TACKER IS INCLUDED IN OPNFV INSTALATION

    controller_command = ("sshpass -p r00tme ssh [email protected]"
                          " 'fuel node'|grep controller|awk '{print $10}'")
    logger.info("Executing tacker script: '%s'" % controller_command)
    process = subprocess.Popen(controller_command,
                               shell=True,
                               stdout=subprocess.PIPE)
    ip = process.stdout.readline()

    iptable_command1 = ("sshpass -p r00tme ssh [email protected] ssh"
                        " " + ip + " iptables -P INPUT ACCEPT ")
    iptable_command2 = ("sshpass -p r00tme ssh [email protected] ssh"
                        " " + ip + "iptables -t nat -P INPUT ACCEPT ")

    subprocess.call(iptable_command1, shell=True)
    subprocess.call(iptable_command2, shell=True)

# Create glance image and the neutron network

    image_id = os_utils.create_glance_image(glance_client,
                                            IMAGE_NAME,
                                            IMAGE_PATH,
                                            disk=IMAGE_FORMAT,
                                            container="bare",
                                            public=True,
                                            logger=logger)

    network_dic = os_utils.create_network_full(logger,
                                               neutron_client,
                                               NET_NAME,
                                               SUBNET_NAME,
                                               ROUTER_NAME,
                                               SUBNET_CIDR)
    if not network_dic:
        logger.error(
            "There has been a problem when creating the neutron network")
        sys.exit(-1)

    network_id = network_dic["net_id"]

    sg_id = os_utils.create_security_group_full(logger, neutron_client,
                                                SECGROUP_NAME, SECGROUP_DESCR)

    # boot INTANCE
    logger.info("Creating instance '%s'..." % INSTANCE_NAME)
    logger.debug(
        "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
        "network=%s \n" % (INSTANCE_NAME, FLAVOR, image_id, network_id))
    instance = os_utils.create_instance_and_wait_for_active(FLAVOR,
                                                            image_id,
                                                            network_id,
                                                            INSTANCE_NAME)

    if instance is None:
        logger.error("Error while booting instance.")
        sys.exit(-1)
    # Retrieve IP of INSTANCE
    instance_ip = instance.networks.get(NET_NAME)[0]
    logger.debug("Instance '%s' got private ip '%s'." %
                 (INSTANCE_NAME, instance_ip))

    logger.info("Adding '%s' to security group '%s'..."
                % (INSTANCE_NAME, SECGROUP_NAME))
    os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)

    logger.info("Creating floating IP for VM '%s'..." % INSTANCE_NAME)
    floatip_dic = os_utils.create_floating_ip(neutron_client)
    floatip_client = floatip_dic['fip_addr']
    # floatip_id = floatip_dic['fip_id']

    if floatip_client is None:
        logger.error("Cannot create floating IP.")
        sys.exit(-1)
    logger.info("Floating IP created: '%s'" % floatip_client)

    logger.info("Associating floating ip: '%s' to VM '%s' "
                % (floatip_client, INSTANCE_NAME))
    if not os_utils.add_floating_ip(nova_client, instance.id, floatip_client):
        logger.error("Cannot associate floating IP to VM.")
        sys.exit(-1)

# STARTING SECOND VM (server) ###

    # boot INTANCE
    logger.info("Creating instance '%s'..." % INSTANCE_NAME)
    logger.debug(
        "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
        "network=%s \n" % (INSTANCE_NAME, FLAVOR, image_id, network_id))
    instance_2 = os_utils.create_instance_and_wait_for_active(FLAVOR,
                                                              image_id,
                                                              network_id,
                                                              INSTANCE_NAME_2)

    if instance_2 is None:
        logger.error("Error while booting instance.")
        sys.exit(-1)
    # Retrieve IP of INSTANCE
    instance_ip_2 = instance_2.networks.get(NET_NAME)[0]
    logger.debug("Instance '%s' got private ip '%s'." %
                 (INSTANCE_NAME_2, instance_ip_2))

    logger.info("Adding '%s' to security group '%s'..."
                % (INSTANCE_NAME_2, SECGROUP_NAME))
    os_utils.add_secgroup_to_instance(nova_client, instance_2.id, sg_id)

    logger.info("Creating floating IP for VM '%s'..." % INSTANCE_NAME_2)
    floatip_dic = os_utils.create_floating_ip(neutron_client)
    floatip_server = floatip_dic['fip_addr']
    # floatip_id = floatip_dic['fip_id']

    if floatip_server is None:
        logger.error("Cannot create floating IP.")
        sys.exit(-1)
    logger.info("Floating IP created: '%s'" % floatip_server)

    logger.info("Associating floating ip: '%s' to VM '%s' "
                % (floatip_server, INSTANCE_NAME_2))

    if not os_utils.add_floating_ip(nova_client,
                                    instance_2.id,
                                    floatip_server):
        logger.error("Cannot associate floating IP to VM.")
        sys.exit(-1)

    # CREATION OF THE 2 SF ####

    tacker_script = "/home/opnfv/repos/functest/testcases/features/sfc/" + \
        TACKER_SCRIPT
    logger.info("Executing tacker script: '%s'" % tacker_script)
    subprocess.call(tacker_script, shell=True)

    # SSH CALL TO START HTTP SERVER
    ssh = paramiko.SSHClient()
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())

    try:
        ssh.connect(floatip_server, username="******",
                    password="******", timeout=2)
        command = "python -m SimpleHTTPServer 80 > /dev/null 2>&1 &"
        logger.info("Starting HTTP server")
        (stdin, stdout, stderr) = ssh.exec_command(command)
    except:
        logger.debug("Waiting for %s..." % floatip_server)
        time.sleep(6)
        # timeout -= 1

    instances = nova_client.servers.list(search_opts={'all_tenants': 1})
    ips = []
    try:
        for instance in instances:
            if "server" not in instance.name:
                if "client" not in instance.name:
                    logger.debug(
                        "This is the instance name: %s " % instance.name)
                    floatip_dic = os_utils.create_floating_ip(neutron_client)
                    floatip = floatip_dic['fip_addr']
                    ips.append(floatip)
                    instance.add_floating_ip(floatip)
    except:
        logger.debug("Problems assigning floating IP to SFs")

    logger.debug("Floating IPs for SFs: %s..." % ips)
    # SSH TO START THE VXLAN_TOOL ON SF1
    logger.info("Configuring the SFs")
    try:
        ssh.connect(ips[0], username="******",
                    password="******", timeout=2)
        command = ("nohup python vxlan_tool.py -i eth0 "
                   "-d forward -v off -f -b 80 &")
        (stdin, stdout, stderr) = ssh.exec_command(command)
    except:
        logger.debug("Waiting for %s..." % ips[0])
        time.sleep(6)
        # timeout -= 1

    # SSH TO START THE VXLAN_TOOL ON SF2
    try:
        ssh.connect(ips[1], username="******",
                    password="******", timeout=2)
        command = ("nohup python vxlan_tool.py -i eth0 "
                   "-d forward -v off -f -b 22 &")
        (stdin, stdout, stderr) = ssh.exec_command(command)
    except:
        logger.debug("Waiting for %s..." % ips[1])
        time.sleep(6)
        # timeout -= 1

    # SSH TO EXECUTE cmd_client

    logger.info("TEST STARTED")
    try:
        ssh.connect(floatip_client, username="******",
                    password="******", timeout=2)
        command = "nc -w 5 -zv " + floatip_server + " 22 2>&1"
        (stdin, stdout, stderr) = ssh.exec_command(command)
    except:
        logger.debug("Waiting for %s..." % floatip_client)
        time.sleep(6)
        # timeout -= 1

    # WRITE THE CORRECT WAY TO DO LOGGING
    i = 0
    logger.info("First output: %s" % stdout.readlines())
    if "timed out" in stdout.readlines()[0]:
        logger.info('\033[92m' + "TEST 1 [PASSED] "
                    "==> SSH BLOCKED" + '\033[0m')
        i = i + 1
    else:
        logger.debug('\033[91m' + "TEST 1 [FAILED] "
                     "==> SSH NOT BLOCKED" + '\033[0m')
        return

    # SSH TO EXECUTE cmd_client

    try:
        ssh.connect(floatip_client, username="******",
                    password="******", timeout=2)
        command = "nc -w 5 -zv " + floatip_server + " 80 2>&1"
        (stdin, stdout, stderr) = ssh.exec_command(command)
    except:
        logger.debug("Waiting for %s..." % floatip_client)
        time.sleep(6)
        # timeout -= 1

    if "succeeded" in stdout.readlines()[0]:
        logger.info('\033[92m' + "TEST 2 [PASSED] "
                    "==> HTTP WORKS" + '\033[0m')
        i = i + 1
    else:
        logger.debug('\033[91m' + "TEST 2 [FAILED] "
                     "==> HTTP BLOCKED" + '\033[0m')
        return

    # CHANGE OF CLASSIFICATION #
    logger.info("Changing the classification")
    tacker_classi = "/home/opnfv/repos/functest/testcases/features/sfc/" + \
        TACKER_CHANGECLASSI
    subprocess.call(tacker_classi, shell=True)

    # SSH TO EXECUTE cmd_client

    try:
        ssh.connect(floatip_client, username="******",
                    password="******", timeout=2)
        command = "nc -w 5 -zv " + floatip_server + " 80 2>&1"
        (stdin, stdout, stderr) = ssh.exec_command(command)
    except:
        logger.debug("Waiting for %s..." % floatip_client)
        time.sleep(6)
        # timeout -= 1

    if "timed out" in stdout.readlines()[0]:
        logger.info('\033[92m' + "TEST 3 [WORKS] "
                    "==> HTTP BLOCKED" + '\033[0m')
        i = i + 1
    else:
        logger.debug('\033[91m' + "TEST 3 [FAILED] "
                     "==> HTTP NOT BLOCKED" + '\033[0m')
        return

    # SSH TO EXECUTE cmd_client

    try:
        ssh.connect(floatip_client, username="******",
                    password="******", timeout=2)
        command = "nc -w 5 -zv " + floatip_server + " 22 2>&1"
        (stdin, stdout, stderr) = ssh.exec_command(command)
    except:
        logger.debug("Waiting for %s..." % floatip_client)
        time.sleep(6)
        # timeout -= 1

    if "succeeded" in stdout.readlines()[0]:
        logger.info('\033[92m' + "TEST 4 [WORKS] "
                    "==> SSH WORKS" + '\033[0m')
        i = i + 1
    else:
        logger.debug('\033[91m' + "TEST 4 [FAILED] "
                     "==> SSH BLOCKED" + '\033[0m')
        return

    if i == 4:
        for x in range(0, 5):
            logger.info('\033[92m' + "SFC TEST WORKED"
                        " :) \n" + '\033[0m')

    sys.exit(0)
示例#24
0
def main():
    global SUMMARY
    global network_dict
    start_time = time.time()
    stop_time = start_time

    # configure script
    if not (args.test_name in tests):
        logger.error('argument not valid')
        exit(-1)

    SUMMARY = []
    creds_nova = openstack_utils.get_credentials("nova")
    nova_client = novaclient.Client('2', **creds_nova)
    creds_neutron = openstack_utils.get_credentials("neutron")
    neutron_client = neutronclient.Client(**creds_neutron)
    creds_keystone = openstack_utils.get_credentials("keystone")
    keystone_client = keystoneclient.Client(**creds_keystone)
    glance_endpoint = keystone_client.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance_client = glanceclient.Client(1,
                                        glance_endpoint,
                                        token=keystone_client.auth_token)
    creds_cinder = openstack_utils.get_credentials("cinder")
    cinder_client = cinderclient.Client('2',
                                        creds_cinder['username'],
                                        creds_cinder['api_key'],
                                        creds_cinder['project_id'],
                                        creds_cinder['auth_url'],
                                        service_type="volume")

    client_dict['neutron'] = neutron_client

    volume_types = openstack_utils.list_volume_types(cinder_client,
                                                     private=False)
    if not volume_types:
        volume_type = openstack_utils.create_volume_type(
            cinder_client, CINDER_VOLUME_TYPE_NAME)
        if not volume_type:
            logger.error("Failed to create volume type...")
            exit(-1)
        else:
            logger.debug("Volume type '%s' created succesfully..." %
                         CINDER_VOLUME_TYPE_NAME)
    else:
        logger.debug("Using existing volume type(s)...")

    image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
    image_exists = False

    if image_id == '':
        logger.debug("Creating image '%s' from '%s'..." %
                     (GLANCE_IMAGE_NAME, GLANCE_IMAGE_PATH))
        image_id = openstack_utils.create_glance_image(glance_client,
                                                       GLANCE_IMAGE_NAME,
                                                       GLANCE_IMAGE_PATH)
        if not image_id:
            logger.error("Failed to create the Glance image...")
            exit(-1)
        else:
            logger.debug("Image '%s' with ID '%s' created succesfully ." %
                         (GLANCE_IMAGE_NAME, image_id))
    else:
        logger.debug("Using existing image '%s' with ID '%s'..." %
                     (GLANCE_IMAGE_NAME, image_id))
        image_exists = True

    logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
    network_dict = openstack_utils.create_network_full(
        logger, client_dict['neutron'], PRIVATE_NET_NAME, PRIVATE_SUBNET_NAME,
        ROUTER_NAME, PRIVATE_SUBNET_CIDR)
    if not network_dict:
        logger.error("Failed to create network...")
        exit(-1)
    else:
        if not openstack_utils.update_neutron_net(
                client_dict['neutron'], network_dict['net_id'], shared=True):
            logger.error("Failed to update network...")
            exit(-1)
        else:
            logger.debug("Network '%s' available..." % PRIVATE_NET_NAME)

    if args.test_name == "all":
        for test_name in tests:
            if not (test_name == 'all' or test_name == 'vm'):
                run_task(test_name)
    else:
        logger.debug("Test name: " + args.test_name)
        run_task(args.test_name)

    report = ("\n"
              "                                                              "
              "\n"
              "                     Rally Summary Report\n"
              "\n"
              "+===================+============+===============+===========+"
              "\n"
              "| Module            | Duration   | nb. Test Run  | Success   |"
              "\n"
              "+===================+============+===============+===========+"
              "\n")
    payload = []
    stop_time = time.time()

    # for each scenario we draw a row for the table
    total_duration = 0.0
    total_nb_tests = 0
    total_success = 0.0
    for s in SUMMARY:
        name = "{0:<17}".format(s['test_name'])
        duration = float(s['overall_duration'])
        total_duration += duration
        duration = time.strftime("%M:%S", time.gmtime(duration))
        duration = "{0:<10}".format(duration)
        nb_tests = "{0:<13}".format(s['nb_tests'])
        total_nb_tests += int(s['nb_tests'])
        success = "{0:<10}".format(str(s['success']) + '%')
        total_success += float(s['success'])
        report += ("" + "| " + name + " | " + duration + " | " + nb_tests +
                   " | " + success + "|\n" +
                   "+-------------------+------------"
                   "+---------------+-----------+\n")
        payload.append({
            'module': name,
            'details': {
                'duration': s['overall_duration'],
                'nb tests': s['nb_tests'],
                'success': s['success']
            }
        })

    total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
    total_duration_str2 = "{0:<10}".format(total_duration_str)
    total_nb_tests_str = "{0:<13}".format(total_nb_tests)
    total_success = "{:0.2f}".format(total_success / len(SUMMARY))
    total_success_str = "{0:<10}".format(str(total_success) + '%')
    report += "+===================+============+===============+===========+"
    report += "\n"
    report += ("| TOTAL:            | " + total_duration_str2 + " | " +
               total_nb_tests_str + " | " + total_success_str + "|\n")
    report += "+===================+============+===============+===========+"
    report += "\n"

    logger.info("\n" + report)
    payload.append({
        'summary': {
            'duration': total_duration,
            'nb tests': total_nb_tests,
            'nb success': total_success
        }
    })

    # Generate json results for DB
    # json_results = {"timestart": time_start, "duration": total_duration,
    #                "tests": int(total_nb_tests),
    #                "success": int(total_success)}
    # logger.info("Results: "+str(json_results))

    # Evaluation of the success criteria
    status = "failed"
    # for Rally we decided that the overall success rate must be above 90%
    if total_success >= 90:
        status = "passed"

    if args.sanity:
        case_name = "rally_sanity"
    else:
        case_name = "rally_full"

    if args.report:
        logger.debug("Pushing Rally summary into DB...")
        functest_utils.push_results_to_db("functest", case_name, None,
                                          start_time, stop_time, status,
                                          payload)
    if args.noclean:
        exit(0)

    if not image_exists:
        logger.debug("Deleting image '%s' with ID '%s'..." %
                     (GLANCE_IMAGE_NAME, image_id))
        if not openstack_utils.delete_glance_image(nova_client, image_id):
            logger.error("Error deleting the glance image")

    if not volume_types:
        logger.debug("Deleting volume type '%s'..." % CINDER_VOLUME_TYPE_NAME)
        if not openstack_utils.delete_volume_type(cinder_client, volume_type):
            logger.error("Error in deleting volume type...")
示例#25
0
def main():

    creds_nova = openstack_utils.get_credentials("nova")
    nova_client = novaclient.Client('2', **creds_nova)
    creds_neutron = openstack_utils.get_credentials("neutron")
    neutron_client = neutronclient.Client(**creds_neutron)
    creds_keystone = openstack_utils.get_credentials("keystone")
    keystone_client = keystoneclient.Client(**creds_keystone)
    glance_endpoint = keystone_client.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance_client = glanceclient.Client(1, glance_endpoint,
                                        token=keystone_client.auth_token)
    EXIT_CODE = -1

    image_id = None
    flavor = None

    # Check if the given image exists
    image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
    if image_id != '':
        logger.info("Using existing image '%s'..." % GLANCE_IMAGE_NAME)
        global image_exists
        image_exists = True
    else:
        logger.info("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
                                                          GLANCE_IMAGE_PATH))
        image_id = openstack_utils.create_glance_image(glance_client,
                                                       GLANCE_IMAGE_NAME,
                                                       GLANCE_IMAGE_PATH)
        if not image_id:
            logger.error("Failed to create a Glance image...")
            return(EXIT_CODE)
        logger.debug("Image '%s' with ID=%s created successfully."
                     % (GLANCE_IMAGE_NAME, image_id))

    network_dic = openstack_utils.create_network_full(logger,
                                                      neutron_client,
                                                      PRIVATE_NET_NAME,
                                                      PRIVATE_SUBNET_NAME,
                                                      ROUTER_NAME,
                                                      PRIVATE_SUBNET_CIDR)
    if not network_dic:
        logger.error(
            "There has been a problem when creating the neutron network")
        return(EXIT_CODE)

    network_id = network_dic["net_id"]

    sg_id = create_security_group(neutron_client)

    # Check if the given flavor exists
    try:
        flavor = nova_client.flavors.find(name=FLAVOR)
        logger.info("Using existing Flavor '%s'..." % FLAVOR)
    except:
        logger.error("Flavor '%s' not found." % FLAVOR)
        logger.info("Available flavors are: ")
        pMsg(nova_client.flavor.list())
        return(EXIT_CODE)

    # Deleting instances if they exist
    servers = nova_client.servers.list()
    for server in servers:
        if server.name == NAME_VM_1 or server.name == NAME_VM_2:
            logger.info("Instance %s found. Deleting..." % server.name)
            server.delete()

    # boot VM 1
    start_time = time.time()
    stop_time = start_time
    logger.info("vPing Start Time:'%s'" % (
        datetime.datetime.fromtimestamp(start_time).strftime(
            '%Y-%m-%d %H:%M:%S')))

    logger.info("Creating instance '%s'..." % NAME_VM_1)
    logger.debug(
        "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
        "network=%s \n" % (NAME_VM_1, flavor, image_id, network_id))
    vm1 = nova_client.servers.create(
        name=NAME_VM_1,
        flavor=flavor,
        image=image_id,
        nics=[{"net-id": network_id}]
    )

    # wait until VM status is active
    if not waitVmActive(nova_client, vm1):
        logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
            NAME_VM_1, openstack_utils.get_instance_status(nova_client, vm1)))
        return (EXIT_CODE)
    else:
        logger.info("Instance '%s' is ACTIVE." % NAME_VM_1)

    # Retrieve IP of first VM
    test_ip = vm1.networks.get(PRIVATE_NET_NAME)[0]
    logger.debug("Instance '%s' got private ip '%s'." % (NAME_VM_1, test_ip))

    logger.info("Adding '%s' to security group '%s'..."
                % (NAME_VM_1, SECGROUP_NAME))
    openstack_utils.add_secgroup_to_instance(nova_client, vm1.id, sg_id)

    # boot VM 2
    logger.info("Creating instance '%s'..." % NAME_VM_2)
    logger.debug(
        "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
        "network=%s \n" % (NAME_VM_2, flavor, image_id, network_id))
    vm2 = nova_client.servers.create(
        name=NAME_VM_2,
        flavor=flavor,
        image=image_id,
        nics=[{"net-id": network_id}]
    )

    if not waitVmActive(nova_client, vm2):
        logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
            NAME_VM_2, openstack_utils.get_instance_status(nova_client, vm2)))
        return (EXIT_CODE)
    else:
        logger.info("Instance '%s' is ACTIVE." % NAME_VM_2)

    logger.info("Adding '%s' to security group '%s'..." % (NAME_VM_2,
                                                           SECGROUP_NAME))
    openstack_utils.add_secgroup_to_instance(nova_client, vm2.id, sg_id)

    logger.info("Creating floating IP for VM '%s'..." % NAME_VM_2)
    floatip_dic = openstack_utils.create_floating_ip(neutron_client)
    floatip = floatip_dic['fip_addr']
    # floatip_id = floatip_dic['fip_id']

    if floatip is None:
        logger.error("Cannot create floating IP.")
        return (EXIT_CODE)
    logger.info("Floating IP created: '%s'" % floatip)

    logger.info("Associating floating ip: '%s' to VM '%s' "
                % (floatip, NAME_VM_2))
    if not openstack_utils.add_floating_ip(nova_client, vm2.id, floatip):
        logger.error("Cannot associate floating IP to VM.")
        return (EXIT_CODE)

    logger.info("Trying to establish SSH connection to %s..." % floatip)
    username = '******'
    password = '******'
    ssh = paramiko.SSHClient()
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())

    timeout = 50
    nolease = False
    got_ip = False
    discover_count = 0
    cidr_first_octet = PRIVATE_SUBNET_CIDR.split('.')[0]
    while timeout > 0:
        try:
            ssh.connect(floatip, username=username,
                        password=password, timeout=2)
            logger.debug("SSH connection established to %s." % floatip)
            break
        except:
            logger.debug("Waiting for %s..." % floatip)
            time.sleep(6)
            timeout -= 1

        console_log = vm2.get_console_output()

        # print each "Sending discover" captured on the console log
        if (len(re.findall("Sending discover", console_log)) >
                discover_count and not got_ip):
            discover_count += 1
            logger.debug("Console-log '%s': Sending discover..."
                         % NAME_VM_2)

        # check if eth0 got an ip,the line looks like this:
        # "inet addr:192.168."....
        # if the dhcp agent fails to assing ip, this line will not appear
        if "inet addr:" + cidr_first_octet in console_log and not got_ip:
            got_ip = True
            logger.debug("The instance '%s' succeeded to get the IP "
                         "from the dhcp agent.")

        # if dhcp doesnt work,it shows "No lease, failing".The test will fail
        if "No lease, failing" in console_log and not nolease and not got_ip:
                nolease = True
                logger.debug("Console-log '%s': No lease, failing..."
                             % NAME_VM_2)
                logger.info("The instance failed to get an IP from the "
                            "DHCP agent. The test will probably timeout...")

    if timeout == 0:  # 300 sec timeout (5 min)
        logger.error("Cannot establish connection to IP '%s'. Aborting"
                     % floatip)
        return (EXIT_CODE)

    scp = SCPClient(ssh.get_transport())

    ping_script = REPO_PATH + "testcases/OpenStack/vPing/ping.sh"
    try:
        scp.put(ping_script, "~/")
    except:
        logger.error("Cannot SCP the file '%s' to VM '%s'"
                     % (ping_script, floatip))
        return (EXIT_CODE)

    cmd = 'chmod 755 ~/ping.sh'
    (stdin, stdout, stderr) = ssh.exec_command(cmd)
    for line in stdout.readlines():
        print line

    logger.info("Waiting for ping...")
    sec = 0
    stop_time = time.time()
    duration = 0

    cmd = '~/ping.sh ' + test_ip
    flag = False
    status = "FAIL"

    while True:
        time.sleep(1)
        (stdin, stdout, stderr) = ssh.exec_command(cmd)
        output = stdout.readlines()

        for line in output:
            if "vPing OK" in line:
                logger.info("vPing detected!")
                status = "PASS"
                # we consider start time at VM1 booting
                stop_time = time.time()
                duration = round(stop_time - start_time, 1)
                logger.info("vPing duration:'%s' s." % duration)
                EXIT_CODE = 0
                flag = True
                break

            elif sec == PING_TIMEOUT:
                logger.info("Timeout reached.")
                flag = True
                break
        if flag:
            break
        logger.debug("Pinging %s. Waiting for response..." % test_ip)
        sec += 1

    if status == "PASS":
        logger.info("vPing OK")
    else:
        duration = 0
        logger.error("vPing FAILED")

    if args.report:
        try:
            logger.debug("Pushing vPing SSH results into DB...")
            functest_utils.push_results_to_db("functest",
                                              "vping_ssh",
                                              logger,
                                              start_time,
                                              stop_time,
                                              status,
                                              details={'timestart': start_time,
                                                       'duration': duration,
                                                       'status': status})
        except:
            logger.error("Error pushing results into Database '%s'"
                         % sys.exc_info()[0])

    exit(EXIT_CODE)
示例#26
0
def main():
    results = Results(COMMON_CONFIG.line_length)

    results.add_to_summary(0, "=")
    results.add_to_summary(2, "STATUS", "SUBTEST")
    results.add_to_summary(0, "=")

    nova_client = os_utils.get_nova_client()
    neutron_client = os_utils.get_neutron_client()
    glance_client = os_utils.get_glance_client()

    (floatingip_ids, instance_ids, router_ids, network_ids, image_ids,
     subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))

    try:
        image_id = os_utils.create_glance_image(
            glance_client,
            TESTCASE_CONFIG.image_name,
            COMMON_CONFIG.image_path,
            disk=COMMON_CONFIG.image_format,
            container="bare",
            public='public')
        image_ids.append(image_id)

        network_1_id = test_utils.create_net(neutron_client,
                                             TESTCASE_CONFIG.net_1_name)
        subnet_1_id = test_utils.create_subnet(neutron_client,
                                               TESTCASE_CONFIG.subnet_1_name,
                                               TESTCASE_CONFIG.subnet_1_cidr,
                                               network_1_id)

        network_2_id = test_utils.create_net(neutron_client,
                                             TESTCASE_CONFIG.net_2_name)

        subnet_2_id = test_utils.create_subnet(neutron_client,
                                               TESTCASE_CONFIG.subnet_2_name,
                                               TESTCASE_CONFIG.subnet_2_cidr,
                                               network_2_id)
        network_ids.extend([network_1_id, network_2_id])
        subnet_ids.extend([subnet_1_id, subnet_2_id])

        sg_id = os_utils.create_security_group_full(
            neutron_client, TESTCASE_CONFIG.secgroup_name,
            TESTCASE_CONFIG.secgroup_descr)

        compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)

        av_zone_1 = "nova:" + compute_nodes[0]
        av_zone_2 = "nova:" + compute_nodes[1]

        # boot INTANCES
        vm_2 = test_utils.create_instance(
            nova_client,
            TESTCASE_CONFIG.instance_2_name,
            image_id,
            network_1_id,
            sg_id,
            secgroup_name=TESTCASE_CONFIG.secgroup_name,
            compute_node=av_zone_1)
        vm_2_ip = test_utils.get_instance_ip(vm_2)

        vm_3 = test_utils.create_instance(
            nova_client,
            TESTCASE_CONFIG.instance_3_name,
            image_id,
            network_1_id,
            sg_id,
            secgroup_name=TESTCASE_CONFIG.secgroup_name,
            compute_node=av_zone_2)
        vm_3_ip = test_utils.get_instance_ip(vm_3)

        vm_5 = test_utils.create_instance(
            nova_client,
            TESTCASE_CONFIG.instance_5_name,
            image_id,
            network_2_id,
            sg_id,
            secgroup_name=TESTCASE_CONFIG.secgroup_name,
            compute_node=av_zone_2)
        vm_5_ip = test_utils.get_instance_ip(vm_5)

        # We boot vm5 first because we need vm5_ip for vm4 userdata
        u4 = test_utils.generate_ping_userdata([vm_5_ip])
        vm_4 = test_utils.create_instance(
            nova_client,
            TESTCASE_CONFIG.instance_4_name,
            image_id,
            network_2_id,
            sg_id,
            secgroup_name=TESTCASE_CONFIG.secgroup_name,
            compute_node=av_zone_1,
            userdata=u4)
        vm_4_ip = test_utils.get_instance_ip(vm_4)

        # We boot VM1 at the end because we need to get the IPs first
        # to generate the userdata
        u1 = test_utils.generate_ping_userdata(
            [vm_2_ip, vm_3_ip, vm_4_ip, vm_5_ip])
        vm_1 = test_utils.create_instance(
            nova_client,
            TESTCASE_CONFIG.instance_1_name,
            image_id,
            network_1_id,
            sg_id,
            secgroup_name=TESTCASE_CONFIG.secgroup_name,
            compute_node=av_zone_1,
            userdata=u1)
        instance_ids.extend([vm_1.id, vm_2.id, vm_3.id, vm_4.id, vm_5.id])

        msg = ("Create VPN with eRT<>iRT")
        results.record_action(msg)
        vpn_name = "sdnvpn-" + str(randint(100000, 999999))
        kwargs = {
            "import_targets": TESTCASE_CONFIG.targets1,
            "export_targets": TESTCASE_CONFIG.targets2,
            "route_distinguishers": TESTCASE_CONFIG.route_distinguishers,
            "name": vpn_name
        }
        bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)
        bgpvpn_id = bgpvpn['bgpvpn']['id']
        logger.debug("VPN created details: %s" % bgpvpn)
        bgpvpn_ids.append(bgpvpn_id)

        msg = ("Associate network '%s' to the VPN." %
               TESTCASE_CONFIG.net_1_name)
        results.record_action(msg)
        results.add_to_summary(0, "-")

        test_utils.create_network_association(neutron_client, bgpvpn_id,
                                              network_1_id)

        # Wait for VMs to be ready.
        instances_up = test_utils.wait_for_instances_up(vm_2, vm_3, vm_5)
        instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_1, vm_4)

        if (not instances_up or not instances_dhcp_up):
            logger.error("One or more instances are down")
            # TODO: Handle this appropriately

        results.get_ping_status(vm_1, vm_2, expected="PASS", timeout=200)
        results.get_ping_status(vm_1, vm_3, expected="PASS", timeout=30)
        results.get_ping_status(vm_1, vm_4, expected="FAIL", timeout=30)

        msg = ("Associate network '%s' to the VPN." %
               TESTCASE_CONFIG.net_2_name)
        results.add_to_summary(0, "-")
        results.record_action(msg)
        results.add_to_summary(0, "-")
        test_utils.create_network_association(neutron_client, bgpvpn_id,
                                              network_2_id)

        test_utils.wait_for_bgp_net_assocs(neutron_client, bgpvpn_id,
                                           network_1_id, network_2_id)

        logger.info("Waiting for the VMs to connect to each other using the"
                    " updated network configuration")
        test_utils.wait_before_subtest()

        results.get_ping_status(vm_4, vm_5, expected="PASS", timeout=30)
        # TODO enable again when isolation in VPN with iRT != eRT works
        # results.get_ping_status(vm_1, vm_4, expected="FAIL", timeout=30)
        # results.get_ping_status(vm_1, vm_5, expected="FAIL", timeout=30)

        msg = ("Update VPN with eRT=iRT ...")
        results.add_to_summary(0, "-")
        results.record_action(msg)
        results.add_to_summary(0, "-")
        kwargs = {
            "import_targets": TESTCASE_CONFIG.targets1,
            "export_targets": TESTCASE_CONFIG.targets1,
            "name": vpn_name
        }
        bgpvpn = test_utils.update_bgpvpn(neutron_client, bgpvpn_id, **kwargs)

        logger.info("Waiting for the VMs to connect to each other using the"
                    " updated network configuration")
        test_utils.wait_before_subtest()

        results.get_ping_status(vm_1, vm_4, expected="PASS", timeout=30)
        results.get_ping_status(vm_1, vm_5, expected="PASS", timeout=30)

    except Exception as e:
        logger.error("exception occurred while executing testcase_1: %s", e)
        raise
    finally:
        test_utils.cleanup_nova(nova_client, instance_ids)
        test_utils.cleanup_glance(glance_client, image_ids)
        test_utils.cleanup_neutron(neutron_client, floatingip_ids, bgpvpn_ids,
                                   interfaces, subnet_ids, router_ids,
                                   network_ids)

    return results.compile_summary()
示例#27
0
def main():
    deploymentHandler = DeploymentFactory.get_handler(
        INSTALLER["type"],
        INSTALLER["ip"],
        INSTALLER["user"],
        installer_pwd=INSTALLER["password"])

    cluster = INSTALLER["cluster"]
    openstack_nodes = (deploymentHandler.get_nodes({'cluster': cluster}) if
                       cluster is not None else deploymentHandler.get_nodes())

    controller_nodes = [
        node for node in openstack_nodes if node.is_controller()
    ]
    compute_nodes = [node for node in openstack_nodes if node.is_compute()]

    odl_ip, odl_port = test_utils.get_odl_ip_port(openstack_nodes)

    for compute in compute_nodes:
        logger.info("This is a compute: %s" % compute.info)

    test_utils.setup_compute_node(SUBNET_CIDR, compute_nodes)
    test_utils.configure_iptables(controller_nodes)

    test_utils.download_image(IMAGE_URL, IMAGE_PATH)
    _, custom_flv_id = os_utils.get_or_create_flavor(FLAVOR,
                                                     RAM_SIZE_IN_MB,
                                                     DISK_SIZE_IN_GB,
                                                     VCPU_COUNT,
                                                     public=True)
    if not custom_flv_id:
        logger.error("Failed to create custom flavor")
        sys.exit(1)

    glance_client = os_utils.get_glance_client()
    neutron_client = os_utils.get_neutron_client()
    nova_client = os_utils.get_nova_client()

    image_id = os_utils.create_glance_image(glance_client,
                                            IMAGE_NAME,
                                            IMAGE_PATH,
                                            IMAGE_FORMAT,
                                            public='public')

    network_id = test_utils.setup_neutron(neutron_client, NET_NAME,
                                          SUBNET_NAME, ROUTER_NAME,
                                          SUBNET_CIDR)

    sg_id = test_utils.create_security_groups(neutron_client, SECGROUP_NAME,
                                              SECGROUP_DESCR)

    vnfs = ['testVNF1', 'testVNF2']

    topo_seed = 0
    testTopology = topo_shuffler.topology(vnfs, seed=topo_seed)

    logger.info('This test is run with the topology {0}'.format(
        testTopology['id']))
    logger.info('Topology description: {0}'.format(
        testTopology['description']))

    client_instance = test_utils.create_instance(
        nova_client,
        CLIENT,
        FLAVOR,
        image_id,
        network_id,
        sg_id,
        av_zone=testTopology['client'])

    server_instance = test_utils.create_instance(
        nova_client,
        SERVER,
        FLAVOR,
        image_id,
        network_id,
        sg_id,
        av_zone=testTopology['server'])

    srv_prv_ip = server_instance.networks.get(NET_NAME)[0]
    logger.info('SERVER PRIVATE IP: {0}'.format(srv_prv_ip))

    logger.info("Assigning floating IPs to instances")
    server_floating_ip = test_utils.assign_floating_ip(nova_client,
                                                       neutron_client,
                                                       server_instance.id)
    client_floating_ip = test_utils.assign_floating_ip(nova_client,
                                                       neutron_client,
                                                       client_instance.id)

    for ip in (server_floating_ip, client_floating_ip):
        logger.info("Checking connectivity towards floating IP [%s]" % ip)
        if not test_utils.ping(ip, retries=50, retry_timeout=1):
            logger.error("Cannot ping floating IP [%s]" % ip)
            sys.exit(1)
        logger.info("Successful ping to floating IP [%s]" % ip)

    logger.info("Starting HTTP server on %s" % server_floating_ip)
    if not test_utils.start_http_server(server_floating_ip):
        logger.error('\033[91mFailed to start HTTP server on %s\033[0m' %
                     server_floating_ip)
        sys.exit(1)
示例#28
0
文件: testcase_4.py 项目: rski/sdnvpn
def main():
    results = Results(COMMON_CONFIG.line_length)

    results.add_to_summary(0, "=")
    results.add_to_summary(2, "STATUS", "SUBTEST")
    results.add_to_summary(0, "=")

    nova_client = os_utils.get_nova_client()
    neutron_client = os_utils.get_neutron_client()
    glance_client = os_utils.get_glance_client()

    image_id = os_utils.create_glance_image(glance_client,
                                            TESTCASE_CONFIG.image_name,
                                            COMMON_CONFIG.image_path,
                                            disk=COMMON_CONFIG.image_format,
                                            container="bare",
                                            public=True)
    network_1_id, _, router_1_id = test_utils.create_network(
        neutron_client, TESTCASE_CONFIG.net_1_name,
        TESTCASE_CONFIG.subnet_1_name, TESTCASE_CONFIG.subnet_1_cidr,
        TESTCASE_CONFIG.router_1_name)

    network_2_id = test_utils.create_net(neutron_client,
                                         TESTCASE_CONFIG.net_2_name)
    test_utils.create_subnet(neutron_client, TESTCASE_CONFIG.subnet_2_name,
                             TESTCASE_CONFIG.subnet_2_cidr, network_2_id)

    sg_id = os_utils.create_security_group_full(neutron_client,
                                                TESTCASE_CONFIG.secgroup_name,
                                                TESTCASE_CONFIG.secgroup_descr)

    compute_nodes = test_utils.assert_and_get_compute_nodes(nova_client)

    av_zone_1 = "nova:" + compute_nodes[0]
    av_zone_2 = "nova:" + compute_nodes[1]

    # boot INTANCES
    vm_2 = test_utils.create_instance(
        nova_client,
        TESTCASE_CONFIG.instance_2_name,
        image_id,
        network_1_id,
        sg_id,
        secgroup_name=TESTCASE_CONFIG.secgroup_name,
        compute_node=av_zone_1)
    vm_2_ip = vm_2.networks.itervalues().next()[0]

    vm_3 = test_utils.create_instance(
        nova_client,
        TESTCASE_CONFIG.instance_3_name,
        image_id,
        network_1_id,
        sg_id,
        secgroup_name=TESTCASE_CONFIG.secgroup_name,
        compute_node=av_zone_2)
    vm_3_ip = vm_3.networks.itervalues().next()[0]

    vm_5 = test_utils.create_instance(
        nova_client,
        TESTCASE_CONFIG.instance_5_name,
        image_id,
        network_2_id,
        sg_id,
        secgroup_name=TESTCASE_CONFIG.secgroup_name,
        compute_node=av_zone_2)
    vm_5_ip = vm_5.networks.itervalues().next()[0]

    # We boot vm5 first because we need vm5_ip for vm4 userdata
    u4 = test_utils.generate_ping_userdata([vm_5_ip])
    vm_4 = test_utils.create_instance(
        nova_client,
        TESTCASE_CONFIG.instance_4_name,
        image_id,
        network_2_id,
        sg_id,
        secgroup_name=TESTCASE_CONFIG.secgroup_name,
        compute_node=av_zone_1,
        userdata=u4)
    vm_4_ip = vm_4.networks.itervalues().next()[0]

    # We boot VM1 at the end because we need to get the IPs first to generate
    # the userdata
    u1 = test_utils.generate_ping_userdata(
        [vm_2_ip, vm_3_ip, vm_4_ip, vm_5_ip])
    vm_1 = test_utils.create_instance(
        nova_client,
        TESTCASE_CONFIG.instance_1_name,
        image_id,
        network_1_id,
        sg_id,
        secgroup_name=TESTCASE_CONFIG.secgroup_name,
        compute_node=av_zone_1,
        userdata=u1)

    msg = ("Create VPN with eRT<>iRT")
    results.record_action(msg)
    vpn_name = "sdnvpn-" + str(randint(100000, 999999))
    kwargs = {
        "import_targets": TESTCASE_CONFIG.targets1,
        "export_targets": TESTCASE_CONFIG.targets2,
        "route_distinguishers": TESTCASE_CONFIG.route_distinguishers,
        "name": vpn_name
    }
    bgpvpn = os_utils.create_bgpvpn(neutron_client, **kwargs)
    bgpvpn_id = bgpvpn['bgpvpn']['id']
    logger.debug("VPN created details: %s" % bgpvpn)

    msg = ("Associate router '%s' to the VPN." % TESTCASE_CONFIG.router_1_name)
    results.record_action(msg)
    results.add_to_summary(0, "-")

    os_utils.create_router_association(neutron_client, bgpvpn_id, router_1_id)

    # Wait for VMs to get ips.
    instances_up = test_utils.wait_for_instances_up(vm_1, vm_2, vm_3, vm_4,
                                                    vm_5)

    if not instances_up:
        logger.error("One or more instances is down")
        # TODO Handle appropriately

    results.get_ping_status(vm_1, vm_2, expected="PASS", timeout=200)
    results.get_ping_status(vm_1, vm_3, expected="PASS", timeout=30)
    results.get_ping_status(vm_1, vm_4, expected="FAIL", timeout=30)

    msg = ("Associate network '%s' to the VPN." % TESTCASE_CONFIG.net_2_name)
    results.add_to_summary(0, "-")
    results.record_action(msg)
    results.add_to_summary(0, "-")
    os_utils.create_network_association(neutron_client, bgpvpn_id,
                                        network_2_id)

    test_utils.wait_for_bgp_router_assoc(neutron_client, bgpvpn_id,
                                         router_1_id)
    test_utils.wait_for_bgp_net_assoc(neutron_client, bgpvpn_id, network_2_id)

    logger.info("Waiting for the VMs to connect to each other using the"
                " updated network configuration")
    test_utils.wait_before_subtest()

    results.get_ping_status(vm_4, vm_5, expected="PASS", timeout=30)
    results.get_ping_status(vm_1, vm_4, expected="FAIL", timeout=30)
    results.get_ping_status(vm_1, vm_5, expected="FAIL", timeout=30)

    msg = ("Update VPN with eRT=iRT ...")
    results.add_to_summary(0, "-")
    results.record_action(msg)
    results.add_to_summary(0, "-")
    kwargs = {
        "import_targets": TESTCASE_CONFIG.targets1,
        "export_targets": TESTCASE_CONFIG.targets1,
        "name": vpn_name
    }
    bgpvpn = os_utils.update_bgpvpn(neutron_client, bgpvpn_id, **kwargs)

    logger.info("Waiting for the VMs to connect to each other using the"
                " updated network configuration")
    test_utils.wait_before_subtest()

    results.get_ping_status(vm_1, vm_4, expected="PASS", timeout=30)
    results.get_ping_status(vm_1, vm_5, expected="PASS", timeout=30)

    results.add_to_summary(0, "=")
    logger.info("\n%s" % results.summary)

    return results.compile_summary(TESTCASE_CONFIG.success_criteria)
示例#29
0
def main():

    nova_client = os_utils.get_nova_client()
    neutron_client = os_utils.get_neutron_client()
    glance_client = os_utils.get_glance_client()

    image_id = os_utils.create_glance_image(glance_client,
                                            EXAMPLE_IMAGE_NAME,
                                            IMAGE_PATH,
                                            disk=IMAGE_FORMAT,
                                            container="bare",
                                            public=True)

    network_dic = os_utils.create_network_full(
                    neutron_client,
                    EXAMPLE_PRIVATE_NET_NAME,
                    EXAMPLE_PRIVATE_SUBNET_NAME,
                    EXAMPLE_ROUTER_NAME,
                    EXAMPLE_PRIVATE_SUBNET_CIDR)
    if not network_dic:
        logger.error(
            "There has been a problem when creating the neutron network")
        sys.exit(-1)

    network_id = network_dic["net_id"]

    sg_id = os_utils.create_security_group_full(neutron_client,
                                                EXAMPLE_SECGROUP_NAME,
                                                EXAMPLE_SECGROUP_DESCR)

    # boot INTANCE
    logger.info("Creating instance '%s'..." % EXAMPLE_INSTANCE_NAME)
    logger.debug(
        "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
        "network=%s \n"
        % (EXAMPLE_INSTANCE_NAME, EXAMPLE_FLAVOR, image_id, network_id))
    instance = os_utils.create_instance_and_wait_for_active(
                EXAMPLE_FLAVOR,
                image_id,
                network_id,
                EXAMPLE_INSTANCE_NAME)

    if instance is None:
        logger.error("Error while booting instance.")
        sys.exit(-1)
    # Retrieve IP of INSTANCE
    instance_ip = instance.networks.get(EXAMPLE_PRIVATE_NET_NAME)[0]
    logger.debug("Instance '%s' got private ip '%s'." %
                 (EXAMPLE_INSTANCE_NAME, instance_ip))

    logger.info("Adding '%s' to security group '%s'..."
                % (EXAMPLE_INSTANCE_NAME, EXAMPLE_SECGROUP_NAME))
    os_utils.add_secgroup_to_instance(nova_client, instance.id, sg_id)

    logger.info("Creating floating IP for VM '%s'..." % EXAMPLE_INSTANCE_NAME)
    floatip_dic = os_utils.create_floating_ip(neutron_client)
    floatip = floatip_dic['fip_addr']
    # floatip_id = floatip_dic['fip_id']

    if floatip is None:
        logger.error("Cannot create floating IP.")
        sys.exit(-1)
    logger.info("Floating IP created: '%s'" % floatip)

    logger.info("Associating floating ip: '%s' to VM '%s' "
                % (floatip, EXAMPLE_INSTANCE_NAME))
    if not os_utils.add_floating_ip(nova_client, instance.id, floatip):
        logger.error("Cannot associate floating IP to VM.")
        sys.exit(-1)

    sys.exit(0)