コード例 #1
0
def create_tempest_resources():
    ks_creds = os_utils.get_credentials("keystone")
    logger.debug("Creating tenant and user for Tempest suite")
    keystone = ksclient.Client(**ks_creds)
    tenant_id = os_utils.create_tenant(keystone,
                                       TENANT_NAME,
                                       TENANT_DESCRIPTION)
    if tenant_id == '':
        logger.error("Error : Failed to create %s tenant" % TENANT_NAME)

    user_id = os_utils.create_user(keystone, USER_NAME, USER_PASSWORD,
                                   None, tenant_id)
    if user_id == '':
        logger.error("Error : Failed to create %s user" % USER_NAME)

    logger.debug("Creating private network for Tempest suite")
    creds_neutron = os_utils.get_credentials("neutron")
    neutron_client = neutronclient.Client(**creds_neutron)
    network_dic = os_utils.create_network_full(logger,
                                               neutron_client,
                                               PRIVATE_NET_NAME,
                                               PRIVATE_SUBNET_NAME,
                                               ROUTER_NAME,
                                               PRIVATE_SUBNET_CIDR)
    if network_dic:
        if not os_utils.update_neutron_net(neutron_client,
                                           network_dic['net_id'],
                                           shared=True):
            logger.error("Failed to update private network...")
            exit(-1)
        else:
            logger.debug("Network '%s' is available..." % PRIVATE_NET_NAME)
    else:
        logger.error("Private network creation failed")
        exit(-1)

    logger.debug("Creating image for Tempest suite")
    glance_endpoint = keystone.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance_client = glanceclient.Client(1, glance_endpoint,
                                        token=keystone.auth_token)
    # Check if the given image exists
    image_id = os_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
    if image_id != '':
        logger.info("Using existing image '%s'..." % GLANCE_IMAGE_NAME)
    else:
        logger.info("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
                                                          GLANCE_IMAGE_PATH))
        image_id = os_utils.create_glance_image(glance_client,
                                                GLANCE_IMAGE_NAME,
                                                GLANCE_IMAGE_PATH)
        if not image_id:
            logger.error("Failed to create a Glance image...")
            exit(-1)
        logger.debug("Image '%s' with ID=%s created successfully."
                     % (GLANCE_IMAGE_NAME, image_id))
コード例 #2
0
    def deploy_vnf(self):
        cw = Clearwater(self.vnf['inputs'], self.orchestrator['object'],
                        self.logger)
        self.vnf['object'] = cw

        self.logger.info("Collect flavor id for all clearwater vm")
        flavor_exist, flavor_id = os_utils.get_or_create_flavor(
            "m1.small",
            self.vnf['requirements']['ram_min'],
            '20',
            '1',
            public=True)
        self.logger.debug("Flavor id: %s" % flavor_id)
        if not flavor_id:
            self.logger.info("Available flavors are: ")
            self.logger.info(self.nova_client.flavor.list())
            self.step_failure("Failed to find required flavor"
                              " for this deployment")

        cw.set_flavor_id(flavor_id)

        # VMs image
        if 'os_image' in self.vnf['requirements'].keys():
            image_id = os_utils.get_image_id(
                self.glance_client, self.vnf['requirements']['os_image'])
            if image_id == '':
                self.step_failure("Failed to find required OS image"
                                  " for clearwater VMs")
        else:
            self.step_failure("Failed to find required OS image"
                              " for clearwater VMs")

        cw.set_image_id(image_id)

        ext_net = os_utils.get_external_net(self.neutron_client)
        if not ext_net:
            self.step_failure("Failed to get external network")

        cw.set_external_network_name(ext_net)

        error = cw.deploy_vnf(self.vnf['blueprint'])
        if error:
            self.logger.error(error)
            return {'status': 'FAIL', 'result': error}
        else:
            return {'status': 'PASS', 'result': ''}
コード例 #3
0
    def deploy_orchestrator(self):  # pylint: disable=too-many-locals
        """
        Create network, subnet, router

        Bootstrap juju
        """
        self.__logger.info("Deployed Orchestrator")
        private_net_name = CONST.__getattribute__(
            'vnf_{}_private_net_name'.format(self.case_name))
        private_subnet_name = CONST.__getattribute__(
            'vnf_{}_private_subnet_name'.format(self.case_name))
        private_subnet_cidr = CONST.__getattribute__(
            'vnf_{}_private_subnet_cidr'.format(self.case_name))
        abot_router = CONST.__getattribute__(
            'vnf_{}_external_router'.format(self.case_name))
        dns_nameserver = CONST.__getattribute__(
            'vnf_{}_dns_nameserver'.format(self.case_name))
        ext_net_name = CONST.__getattribute__(
            'vnf_{}_external_network_name'.format(self.case_name))

        self.__logger.info("Creating full network ...")
        subnet_settings = SubnetSettings(name=private_subnet_name,
                                         cidr=private_subnet_cidr,
                                         dns_nameservers=dns_nameserver)
        network_settings = NetworkSettings(name=private_net_name,
                                           subnet_settings=[subnet_settings])
        network_creator = OpenStackNetwork(self.snaps_creds, network_settings)
        network_creator.create()
        self.created_object.append(network_creator)

        ext_net_name = snaps_utils.get_ext_net_name(self.snaps_creds)
        self.__logger.info("Creating network Router ....")
        router_creator = OpenStackRouter(
            self.snaps_creds,
            RouterSettings(
                name=abot_router,
                external_gateway=ext_net_name,
                internal_subnets=[subnet_settings.name]))
        router_creator.create()
        self.created_object.append(router_creator)
        self.__logger.info("Creating Flavor ....")
        flavor_settings = FlavorSettings(
            name=self.orchestrator['requirements']['flavor']['name'],
            ram=self.orchestrator['requirements']['flavor']['ram_min'],
            disk=10,
            vcpus=1)
        flavor_creator = OpenStackFlavor(self.snaps_creds, flavor_settings)
        self.__logger.info("Juju Bootstrap: Skip creation of flavors")
        flavor_creator.create()
        self.created_object.append(flavor_creator)
        self.__logger.info("Installing Dependency Packages .......")
        source_dir = "/src/epc-requirements/juju_bin_build"
        if os.path.exists(source_dir):
            shutil.rmtree(source_dir)
        os.makedirs(source_dir)
        os.environ['GOPATH'] = str(source_dir)
        os.environ['GOBIN'] = str(source_dir) + "/bin"
        os.environ['PATH'] = ((os.path.expandvars('$GOPATH')) + ":" +
                              (os.path.expandvars('$GOBIN')) + ":" +
                              (os.path.expandvars('$PATH')))
        os.system('go get -d -v github.com/juju/juju/...')
        os.chdir(source_dir + "/src" + "/github.com" + "/juju" + "/juju")
        os.system('git checkout tags/juju-2.2.5')
        os.system('go get github.com/rogpeppe/godeps')
        os.system('godeps -u dependencies.tsv')
        os.system('go install -v github.com/juju/juju/...')
        self.__logger.info("Creating Cloud for Abot-epc .....")
        os.system('juju add-cloud abot-epc -f {}'.format(self.filename))
        os.system('juju add-credential abot-epc -f {}'.format(self.filename))
        for image_name in self.images.keys():
            self.__logger.info("Generating Metadata for %s", image_name)
            image_id = os_utils.get_image_id(self.glance_client, image_name)
            os.system(
                'juju metadata generate-image -d ~ -i {} -s {} -r '
                '{} -u {}'.format(
                    image_id, image_name,
                    os.environ.get("OS_REGION_NAME", self.default_region_name),
                    self.public_auth_url))
        net_id = os_utils.get_network_id(self.neutron_client, private_net_name)
        self.__logger.info("Credential information  : %s", net_id)
        juju_bootstrap_command = ('juju bootstrap abot-epc abot-controller '
                                  '--config network={} --metadata-source ~  '
                                  '--config ssl-hostname-verification=false '
                                  '--constraints mem=2G --bootstrap-series '
                                  'xenial '
                                  '--config use-floating-ip=true --debug'.
                                  format(net_id))
        os.system(juju_bootstrap_command)
        return True
コード例 #4
0
    def deploy_orchestrator(self):
        self.logger.info("Additional pre-configuration steps")
        nova_client = os_utils.get_nova_client()
        neutron_client = os_utils.get_neutron_client()
        glance_client = os_utils.get_glance_client()

        # Import images if needed
        # needs some images
        self.logger.info("Upload some OS images if it doesn't exist")
        temp_dir = os.path.join(self.data_dir, "tmp/")
        for image_name, image_url in self.images.iteritems():
            self.logger.info("image: %s, url: %s", image_name, image_url)
            try:
                image_id = os_utils.get_image_id(glance_client, image_name)
                self.logger.info("image_id: %s", image_id)
            except BaseException:
                self.logger.error("Unexpected error: %s", sys.exc_info()[0])

            if image_id == '':
                self.logger.info("""%s image doesn't exist on glance
                                 repository. Try downloading this image
                                 and upload on glance !""" % image_name)
                image_id = os_utils.download_and_add_image_on_glance(
                    glance_client, image_name, image_url, temp_dir)
            if image_id == '':
                self.logger.error("Failed to find or upload required OS "
                                  "image for this deployment")
                return False

        network_dic = os_utils.create_network_full(neutron_client,
                                                   "openbaton_mgmt",
                                                   "openbaton_mgmt_subnet",
                                                   "openbaton_router",
                                                   "192.168.100.0/24")

        # orchestrator VM flavor
        self.logger.info(
            "Check if orchestra Flavor is available, if not create one")
        flavor_exist, flavor_id = os_utils.get_or_create_flavor("orchestra",
                                                                "4096",
                                                                '20',
                                                                '2',
                                                                public=True)
        self.logger.debug("Flavor id: %s" % flavor_id)

        if not network_dic:
            self.logger.error("There has been a problem when creating the "
                              "neutron network")

        network_id = network_dic["net_id"]

        self.logger.info("Creating floating IP for VM in advance...")
        floatip_dic = os_utils.create_floating_ip(neutron_client)
        floatip = floatip_dic['fip_addr']

        if floatip is None:
            self.logger.error("Cannot create floating IP.")
            return False

        userdata = "#!/bin/bash\n"
        userdata += "echo \"Executing userdata...\"\n"
        userdata += "set -x\n"
        userdata += "set -e\n"
        userdata += "echo \"Set nameserver to '8.8.8.8'...\"\n"
        userdata += "echo \"nameserver   8.8.8.8\" >> /etc/resolv.conf\n"
        userdata += "echo \"Install curl...\"\n"
        userdata += "apt-get install curl\n"
        userdata += "echo \"Inject public key...\"\n"
        userdata += ("echo \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCuPXrV3"
                     "geeHc6QUdyUr/1Z+yQiqLcOskiEGBiXr4z76MK4abiFmDZ18OMQlc"
                     "fl0p3kS0WynVgyaOHwZkgy/DIoIplONVr2CKBKHtPK+Qcme2PVnCtv"
                     "EqItl/FcD+1h5XSQGoa+A1TSGgCod/DPo+pes0piLVXP8Ph6QS1k7S"
                     "ic7JDeRQ4oT1bXYpJ2eWBDMfxIWKZqcZRiGPgMIbJ1iEkxbpeaAd9O"
                     "4MiM9nGCPESmed+p54uYFjwEDlAJZShcAZziiZYAvMZhvAhe6USljc"
                     "7YAdalAnyD/jwCHuwIrUw/lxo7UdNCmaUxeobEYyyFA1YVXzpNFZya"
                     "XPGAAYIJwEq/ openbaton@opnfv\" >> /home/ubuntu/.ssh/aut"
                     "horized_keys\n")
        userdata += "echo \"Download bootstrap...\"\n"
        userdata += ("curl -s %s " "> ./bootstrap\n" % self.bootstrap_link)
        userdata += ("curl -s %s"
                     "> ./config_file\n" % self.bootstrap_config_link)
        userdata += ("echo \"Disable usage of mysql...\"\n")
        userdata += "sed -i s/mysql=.*/mysql=no/g /config_file\n"
        userdata += ("echo \"Setting 'rabbitmq_broker_ip' to '%s'\"\n" %
                     floatip)
        userdata += ("sed -i s/rabbitmq_broker_ip=localhost/rabbitmq_broker_ip"
                     "=%s/g /config_file\n" % floatip)
        userdata += "echo \"Set autostart of components to 'false'\"\n"
        userdata += "export OPENBATON_COMPONENT_AUTOSTART=false\n"
        userdata += "echo \"Execute bootstrap...\"\n"
        bootstrap = "sh ./bootstrap release -configFile=./config_file"
        userdata += bootstrap + "\n"
        userdata += "echo \"Setting 'nfvo.plugin.timeout' to '300000'\"\n"
        userdata += ("echo \"nfvo.plugin.timeout=600000\" >> "
                     "/etc/openbaton/openbaton-nfvo.properties\n")
        userdata += (
            "wget %s -O /etc/openbaton/openbaton-vnfm-generic-user-data.sh\n" %
            self.userdata_file)
        userdata += "sed -i '113i\ \ \ \ sleep 60' " \
                    "/etc/openbaton/openbaton-vnfm-generic-user-data.sh\n"
        userdata += "echo \"Starting NFVO\"\n"
        userdata += "service openbaton-nfvo restart\n"
        userdata += "echo \"Starting Generic VNFM\"\n"
        userdata += "service openbaton-vnfm-generic restart\n"
        userdata += "echo \"...end of userdata...\"\n"

        sg_id = os_utils.create_security_group_full(neutron_client,
                                                    "orchestra-sec-group",
                                                    "allowall")

        os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress", "icmp",
                                      0, 255)
        os_utils.create_secgroup_rule(neutron_client, sg_id, "egress", "icmp",
                                      0, 255)
        os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress", "tcp",
                                      1, 65535)
        os_utils.create_secgroup_rule(neutron_client, sg_id, "ingress", "udp",
                                      1, 65535)
        os_utils.create_secgroup_rule(neutron_client, sg_id, "egress", "tcp",
                                      1, 65535)
        os_utils.create_secgroup_rule(neutron_client, sg_id, "egress", "udp",
                                      1, 65535)

        self.logger.info("Security group set")

        self.logger.info("Create instance....")
        self.logger.info(
            "flavor: m1.medium\n"
            "image: %s\n"
            "network_id: %s\n"
            "userdata: %s\n", self.imagename, network_id, userdata)

        instance = os_utils.create_instance_and_wait_for_active(
            "orchestra",
            os_utils.get_image_id(glance_client, self.imagename),
            network_id,
            "orchestra-openbaton",
            config_drive=False,
            userdata=userdata)

        self.ob_instance_id = instance.id

        self.logger.info("Adding sec group to orchestra instance")
        os_utils.add_secgroup_to_instance(nova_client, self.ob_instance_id,
                                          sg_id)

        self.logger.info("Associating floating ip: '%s' to VM '%s' ", floatip,
                         "orchestra-openbaton")
        if not os_utils.add_floating_ip(nova_client, instance.id, floatip):
            self.logger.error("Cannot associate floating IP to VM.")
            return False

        self.logger.info("Waiting for Open Baton NFVO to be up and running...")
        x = 0
        while x < 200:
            if servertest(floatip, "8080"):
                break
            else:
                self.logger.debug("Open Baton NFVO is not started yet (%ss)" %
                                  (x * 5))
                time.sleep(5)
                x += 1

        if x == 200:
            self.logger.error("Open Baton is not started correctly")

        self.ob_ip = floatip
        self.ob_password = "******"
        self.ob_username = "******"
        self.ob_https = False
        self.ob_port = "8080"
        self.logger.info("Waiting for all components up and running...")
        time.sleep(60)
        self.details["orchestrator"] = {
            'status': "PASS",
            'result': "Deploy Open Baton NFVO: OK"
        }
        self.logger.info("Deploy Open Baton NFVO: OK")
        return True
コード例 #5
0
def main():
    global SUMMARY
    global network_dict
    start_time = time.time()
    stop_time = start_time

    # configure script
    if not (args.test_name in tests):
        logger.error('argument not valid')
        exit(-1)

    SUMMARY = []
    creds_nova = openstack_utils.get_credentials("nova")
    nova_client = novaclient.Client('2', **creds_nova)
    creds_neutron = openstack_utils.get_credentials("neutron")
    neutron_client = neutronclient.Client(**creds_neutron)
    creds_keystone = openstack_utils.get_credentials("keystone")
    keystone_client = keystoneclient.Client(**creds_keystone)
    glance_endpoint = keystone_client.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance_client = glanceclient.Client(1,
                                        glance_endpoint,
                                        token=keystone_client.auth_token)
    creds_cinder = openstack_utils.get_credentials("cinder")
    cinder_client = cinderclient.Client('2',
                                        creds_cinder['username'],
                                        creds_cinder['api_key'],
                                        creds_cinder['project_id'],
                                        creds_cinder['auth_url'],
                                        service_type="volume")

    client_dict['neutron'] = neutron_client

    volume_types = openstack_utils.list_volume_types(cinder_client,
                                                     private=False)
    if not volume_types:
        volume_type = openstack_utils.create_volume_type(
            cinder_client, CINDER_VOLUME_TYPE_NAME)
        if not volume_type:
            logger.error("Failed to create volume type...")
            exit(-1)
        else:
            logger.debug("Volume type '%s' created succesfully..." %
                         CINDER_VOLUME_TYPE_NAME)
    else:
        logger.debug("Using existing volume type(s)...")

    image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
    image_exists = False

    if image_id == '':
        logger.debug("Creating image '%s' from '%s'..." %
                     (GLANCE_IMAGE_NAME, GLANCE_IMAGE_PATH))
        image_id = openstack_utils.create_glance_image(glance_client,
                                                       GLANCE_IMAGE_NAME,
                                                       GLANCE_IMAGE_PATH)
        if not image_id:
            logger.error("Failed to create the Glance image...")
            exit(-1)
        else:
            logger.debug("Image '%s' with ID '%s' created succesfully ." %
                         (GLANCE_IMAGE_NAME, image_id))
    else:
        logger.debug("Using existing image '%s' with ID '%s'..." %
                     (GLANCE_IMAGE_NAME, image_id))
        image_exists = True

    logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
    network_dict = openstack_utils.create_network_full(
        logger, client_dict['neutron'], PRIVATE_NET_NAME, PRIVATE_SUBNET_NAME,
        ROUTER_NAME, PRIVATE_SUBNET_CIDR)
    if not network_dict:
        logger.error("Failed to create network...")
        exit(-1)
    else:
        if not openstack_utils.update_neutron_net(
                client_dict['neutron'], network_dict['net_id'], shared=True):
            logger.error("Failed to update network...")
            exit(-1)
        else:
            logger.debug("Network '%s' available..." % PRIVATE_NET_NAME)

    if args.test_name == "all":
        for test_name in tests:
            if not (test_name == 'all' or test_name == 'vm'):
                run_task(test_name)
    else:
        logger.debug("Test name: " + args.test_name)
        run_task(args.test_name)

    report = ("\n"
              "                                                              "
              "\n"
              "                     Rally Summary Report\n"
              "\n"
              "+===================+============+===============+===========+"
              "\n"
              "| Module            | Duration   | nb. Test Run  | Success   |"
              "\n"
              "+===================+============+===============+===========+"
              "\n")
    payload = []
    stop_time = time.time()

    # for each scenario we draw a row for the table
    total_duration = 0.0
    total_nb_tests = 0
    total_success = 0.0
    for s in SUMMARY:
        name = "{0:<17}".format(s['test_name'])
        duration = float(s['overall_duration'])
        total_duration += duration
        duration = time.strftime("%M:%S", time.gmtime(duration))
        duration = "{0:<10}".format(duration)
        nb_tests = "{0:<13}".format(s['nb_tests'])
        total_nb_tests += int(s['nb_tests'])
        success = "{0:<10}".format(str(s['success']) + '%')
        total_success += float(s['success'])
        report += ("" + "| " + name + " | " + duration + " | " + nb_tests +
                   " | " + success + "|\n" +
                   "+-------------------+------------"
                   "+---------------+-----------+\n")
        payload.append({
            'module': name,
            'details': {
                'duration': s['overall_duration'],
                'nb tests': s['nb_tests'],
                'success': s['success']
            }
        })

    total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
    total_duration_str2 = "{0:<10}".format(total_duration_str)
    total_nb_tests_str = "{0:<13}".format(total_nb_tests)
    total_success = "{:0.2f}".format(total_success / len(SUMMARY))
    total_success_str = "{0:<10}".format(str(total_success) + '%')
    report += "+===================+============+===============+===========+"
    report += "\n"
    report += ("| TOTAL:            | " + total_duration_str2 + " | " +
               total_nb_tests_str + " | " + total_success_str + "|\n")
    report += "+===================+============+===============+===========+"
    report += "\n"

    logger.info("\n" + report)
    payload.append({
        'summary': {
            'duration': total_duration,
            'nb tests': total_nb_tests,
            'nb success': total_success
        }
    })

    # Generate json results for DB
    # json_results = {"timestart": time_start, "duration": total_duration,
    #                "tests": int(total_nb_tests),
    #                "success": int(total_success)}
    # logger.info("Results: "+str(json_results))

    # Evaluation of the success criteria
    status = "failed"
    # for Rally we decided that the overall success rate must be above 90%
    if total_success >= 90:
        status = "passed"

    if args.sanity:
        case_name = "rally_sanity"
    else:
        case_name = "rally_full"

    if args.report:
        logger.debug("Pushing Rally summary into DB...")
        functest_utils.push_results_to_db("functest", case_name, None,
                                          start_time, stop_time, status,
                                          payload)
    if args.noclean:
        exit(0)

    if not image_exists:
        logger.debug("Deleting image '%s' with ID '%s'..." %
                     (GLANCE_IMAGE_NAME, image_id))
        if not openstack_utils.delete_glance_image(nova_client, image_id):
            logger.error("Error deleting the glance image")

    if not volume_types:
        logger.debug("Deleting volume type '%s'..." % CINDER_VOLUME_TYPE_NAME)
        if not openstack_utils.delete_volume_type(cinder_client, volume_type):
            logger.error("Error in deleting volume type...")
コード例 #6
0
def main():

    creds_nova = openstack_utils.get_credentials("nova")
    nova_client = novaclient.Client('2', **creds_nova)
    creds_neutron = openstack_utils.get_credentials("neutron")
    neutron_client = neutronclient.Client(**creds_neutron)
    creds_keystone = openstack_utils.get_credentials("keystone")
    keystone_client = keystoneclient.Client(**creds_keystone)
    glance_endpoint = keystone_client.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance_client = glanceclient.Client(1, glance_endpoint,
                                        token=keystone_client.auth_token)
    EXIT_CODE = -1

    image_id = None
    flavor = None

    # Check if the given image exists
    image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
    if image_id != '':
        logger.info("Using existing image '%s'..." % GLANCE_IMAGE_NAME)
        global image_exists
        image_exists = True
    else:
        logger.info("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
                                                          GLANCE_IMAGE_PATH))
        image_id = openstack_utils.create_glance_image(glance_client,
                                                       GLANCE_IMAGE_NAME,
                                                       GLANCE_IMAGE_PATH)
        if not image_id:
            logger.error("Failed to create a Glance image...")
            return(EXIT_CODE)
        logger.debug("Image '%s' with ID=%s created successfully."
                     % (GLANCE_IMAGE_NAME, image_id))

    network_dic = openstack_utils.create_network_full(logger,
                                                      neutron_client,
                                                      PRIVATE_NET_NAME,
                                                      PRIVATE_SUBNET_NAME,
                                                      ROUTER_NAME,
                                                      PRIVATE_SUBNET_CIDR)
    if not network_dic:
        logger.error(
            "There has been a problem when creating the neutron network")
        return(EXIT_CODE)
    network_id = network_dic["net_id"]

    create_security_group(neutron_client)

    # Check if the given flavor exists
    try:
        flavor = nova_client.flavors.find(name=FLAVOR)
        logger.info("Flavor found '%s'" % FLAVOR)
    except:
        logger.error("Flavor '%s' not found." % FLAVOR)
        logger.info("Available flavors are: ")
        pMsg(nova_client.flavor.list())
        exit(-1)

    # Deleting instances if they exist
    servers = nova_client.servers.list()
    for server in servers:
        if server.name == NAME_VM_1 or server.name == NAME_VM_2:
            logger.info("Instance %s found. Deleting..." % server.name)
            server.delete()

    # boot VM 1
    # basic boot
    # tune (e.g. flavor, images, network) to your specific
    # openstack configuration here
    # we consider start time at VM1 booting
    start_time = time.time()
    stop_time = start_time
    logger.info("vPing Start Time:'%s'" % (
        datetime.datetime.fromtimestamp(start_time).strftime(
            '%Y-%m-%d %H:%M:%S')))

    # create VM
    logger.info("Creating instance '%s'..." % NAME_VM_1)
    logger.debug(
        "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
        "network=%s \n" % (NAME_VM_1, flavor, image_id, network_id))
    vm1 = nova_client.servers.create(
        name=NAME_VM_1,
        flavor=flavor,
        image=image_id,
        config_drive=True,
        nics=[{"net-id": network_id}]
    )

    # wait until VM status is active
    if not waitVmActive(nova_client, vm1):

        logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
            NAME_VM_1, openstack_utils.get_instance_status(nova_client, vm1)))
        return (EXIT_CODE)
    else:
        logger.info("Instance '%s' is ACTIVE." % NAME_VM_1)

    # Retrieve IP of first VM
    test_ip = vm1.networks.get(PRIVATE_NET_NAME)[0]
    logger.debug("Instance '%s' got %s" % (NAME_VM_1, test_ip))

    # boot VM 2
    # we will boot then execute a ping script with cloud-init
    # the long chain corresponds to the ping procedure converted with base 64
    # tune (e.g. flavor, images, network) to your specific openstack
    #  configuration here
    u = ("#!/bin/sh\n\nwhile true; do\n ping -c 1 %s 2>&1 >/dev/null\n "
         "RES=$?\n if [ \"Z$RES\" = \"Z0\" ] ; then\n  echo 'vPing OK'\n "
         "break\n else\n  echo 'vPing KO'\n fi\n sleep 1\ndone\n" % test_ip)

    # create VM
    logger.info("Creating instance '%s'..." % NAME_VM_2)
    logger.debug(
        "Configuration:\n name=%s \n flavor=%s \n image=%s \n network=%s "
        "\n userdata= \n%s" % (
            NAME_VM_2, flavor, image_id, network_id, u))
    vm2 = nova_client.servers.create(
        name=NAME_VM_2,
        flavor=flavor,
        image=image_id,
        nics=[{"net-id": network_id}],
        config_drive=True,
        userdata=u
    )

    if not waitVmActive(nova_client, vm2):
        logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
            NAME_VM_2, openstack_utils.get_instance_status(nova_client, vm2)))
        return (EXIT_CODE)
    else:
        logger.info("Instance '%s' is ACTIVE." % NAME_VM_2)

    logger.info("Waiting for ping...")
    sec = 0
    metadata_tries = 0
    console_log = vm2.get_console_output()
    duration = 0
    stop_time = time.time()

    while True:
        time.sleep(1)
        console_log = vm2.get_console_output()
        # print "--"+console_log
        # report if the test is failed
        if "vPing OK" in console_log:
            logger.info("vPing detected!")

            # we consider start time at VM1 booting
            stop_time = time.time()
            duration = round(stop_time - start_time, 1)
            logger.info("vPing duration:'%s'" % duration)
            EXIT_CODE = 0
            break
        elif ("failed to read iid from metadata" in console_log or
              metadata_tries > 5):
            EXIT_CODE = -2
            break
        elif sec == PING_TIMEOUT:
            logger.info("Timeout reached.")
            break
        elif sec % 10 == 0:
            if "request failed" in console_log:
                logger.debug("It seems userdata is not supported in "
                             "nova boot. Waiting a bit...")
                metadata_tries += 1
            else:
                logger.debug("Pinging %s. Waiting for response..." % test_ip)
        sec += 1

    test_status = "NOK"
    if EXIT_CODE == 0:
        logger.info("vPing OK")
        test_status = "OK"
    elif EXIT_CODE == -2:
        duration = 0
        logger.info("Userdata is not supported in nova boot. Aborting test...")
    else:
        duration = 0
        logger.error("vPing FAILED")

    if args.report:
        try:
            logger.debug("Pushing vPing userdata results into DB...")
            functest_utils.push_results_to_db("functest",
                                              "vping_userdata",
                                              logger,
                                              start_time,
                                              stop_time,
                                              test_status,
                                              details={'timestart': start_time,
                                                       'duration': duration,
                                                       'status': test_status})
        except:
            logger.error("Error pushing results into Database '%s'"
                         % sys.exc_info()[0])

    exit(EXIT_CODE)
コード例 #7
0
    def deploy_orchestrator(self, **kwargs):

        self.logger.info("Additional pre-configuration steps")
        self.neutron_client = os_utils.get_neutron_client(self.admin_creds)
        self.glance_client = os_utils.get_glance_client(self.admin_creds)
        self.keystone_client = os_utils.get_keystone_client(self.admin_creds)
        self.nova_client = os_utils.get_nova_client(self.admin_creds)

        # needs some images
        self.logger.info("Upload some OS images if it doesn't exist")
        temp_dir = os.path.join(self.data_dir, "tmp/")
        for image_name, image_url in self.images.iteritems():
            self.logger.info("image: %s, url: %s" % (image_name, image_url))
            try:
                image_id = os_utils.get_image_id(self.glance_client,
                                                 image_name)
                self.logger.debug("image_id: %s" % image_id)
            except:
                self.logger.error("Unexpected error: %s" % sys.exc_info()[0])

            if image_id == '':
                self.logger.info(
                    """%s image doesn't exist on glance repository. Try
                downloading this image and upload on glance !""" % image_name)
                image_id = download_and_add_image_on_glance(
                    self.glance_client, image_name, image_url, temp_dir)
            if image_id == '':
                self.step_failure("Failed to find or upload required OS "
                                  "image for this deployment")
        # Need to extend quota
        self.logger.info("Update security group quota for this tenant")
        tenant_id = os_utils.get_tenant_id(self.keystone_client,
                                           self.tenant_name)
        self.logger.debug("Tenant id found %s" % tenant_id)
        if not os_utils.update_sg_quota(self.neutron_client, tenant_id, 50,
                                        100):
            self.step_failure("Failed to update security group quota" +
                              " for tenant " + self.tenant_name)
        self.logger.debug("group quota extended")

        # start the deployment of cloudify
        public_auth_url = os_utils.get_endpoint('identity')

        self.logger.debug("CFY inputs: %s" % self.orchestrator['inputs'])
        cfy = Orchestrator(self.data_dir, self.orchestrator['inputs'])
        self.orchestrator['object'] = cfy
        self.logger.debug("Orchestrator object created")

        self.logger.debug("Tenant name: %s" % self.tenant_name)

        cfy.set_credentials(username=self.tenant_name,
                            password=self.tenant_name,
                            tenant_name=self.tenant_name,
                            auth_url=public_auth_url)
        self.logger.info("Credentials set in CFY")

        # orchestrator VM flavor
        self.logger.info("Check Flavor is available, if not create one")
        self.logger.debug("Flavor details %s " %
                          self.orchestrator['requirements']['ram_min'])
        flavor_exist, flavor_id = os_utils.get_or_create_flavor(
            "m1.large",
            self.orchestrator['requirements']['ram_min'],
            '1',
            '1',
            public=True)
        self.logger.debug("Flavor id: %s" % flavor_id)

        if not flavor_id:
            self.logger.info("Available flavors are: ")
            self.logger.info(self.nova_client.flavor.list())
            self.step_failure("Failed to find required flavor"
                              "for this deployment")
        cfy.set_flavor_id(flavor_id)
        self.logger.debug("Flavor OK")

        # orchestrator VM image
        self.logger.debug("Orchestrator image")
        if 'os_image' in self.orchestrator['requirements'].keys():
            image_id = os_utils.get_image_id(
                self.glance_client,
                self.orchestrator['requirements']['os_image'])
            self.logger.debug("Orchestrator image id: %s" % image_id)
            if image_id == '':
                self.logger.error("CFY image not found")
                self.step_failure("Failed to find required OS image"
                                  " for cloudify manager")
        else:
            self.step_failure("Failed to find required OS image"
                              " for cloudify manager")

        cfy.set_image_id(image_id)
        self.logger.debug("Orchestrator image set")

        self.logger.debug("Get External network")
        ext_net = os_utils.get_external_net(self.neutron_client)
        self.logger.debug("External network: %s" % ext_net)
        if not ext_net:
            self.step_failure("Failed to get external network")

        cfy.set_external_network_name(ext_net)
        self.logger.debug("CFY External network set")

        self.logger.debug("get resolvconf")
        ns = ft_utils.get_resolvconf_ns()
        if ns:
            cfy.set_nameservers(ns)
            self.logger.debug("Resolvconf set")

        self.logger.info("Prepare virtualenv for cloudify-cli")
        cmd = "chmod +x " + self.case_dir + "create_venv.sh"
        ft_utils.execute_command(cmd)
        time.sleep(3)
        cmd = self.case_dir + "create_venv.sh " + self.data_dir
        ft_utils.execute_command(cmd)

        cfy.download_manager_blueprint(
            self.orchestrator['blueprint']['url'],
            self.orchestrator['blueprint']['branch'])

        cfy.deploy_manager()
        return {'status': 'PASS', 'result': ''}
コード例 #8
0
ファイル: vHello.py プロジェクト: blsaws/vHello
def main():

    # ############### GENERAL INITIALISATION ################

    if not os.path.exists(vHello_DATA_DIR):
        os.makedirs(vHello_DATA_DIR)

    ks_creds = os_utils.get_credentials("keystone")
    nv_creds = os_utils.get_credentials("nova")
    nt_creds = os_utils.get_credentials("neutron")

    logger.info("Prepare OpenStack plateform (create tenant and user)")
    keystone = ksclient.Client(**ks_creds)

    user_id = os_utils.get_user_id(keystone, ks_creds['username'])
    if user_id == '':
        step_failure("init", "Error : Failed to get id of " +
                     ks_creds['username'])

    tenant_id = os_utils.create_tenant(
        keystone, TENANT_NAME, TENANT_DESCRIPTION)
    if tenant_id == '':
        step_failure("init", "Error : Failed to create " +
                     TENANT_NAME + " tenant")

    roles_name = ["admin", "Admin"]
    role_id = ''
    for role_name in roles_name:
        if role_id == '':
            role_id = os_utils.get_role_id(keystone, role_name)

    if role_id == '':
        logger.error("Error : Failed to get id for %s role" % role_name)

    if not os_utils.add_role_user(keystone, user_id, role_id, tenant_id):
        logger.error("Error : Failed to add %s on tenant" %
                     ks_creds['username'])

    user_id = os_utils.create_user(
        keystone, TENANT_NAME, TENANT_NAME, None, tenant_id)
    if user_id == '':
        logger.error("Error : Failed to create %s user" % TENANT_NAME)

    logger.info("Update OpenStack creds informations")
    ks_creds.update({
        "username": TENANT_NAME,
        "password": TENANT_NAME,
        "tenant_name": TENANT_NAME,
    })

    nt_creds.update({
        "tenant_name": TENANT_NAME,
    })

    nv_creds.update({
        "project_id": TENANT_NAME,
    })

    logger.info("Upload some OS images if it doesn't exist")
    glance_endpoint = keystone.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance = glclient.Client(1, glance_endpoint, token=keystone.auth_token)

    for img in IMAGES.keys():
        image_name = IMAGES[img]['image_name']
        image_url = IMAGES[img]['image_url']

        image_id = os_utils.get_image_id(glance, image_name)

        if image_id == '':
            logger.info("""%s image doesn't exist on glance repository. Try
            downloading this image and upload on glance !""" % image_name)
            image_id = download_and_add_image_on_glance(
                glance, image_name, image_url)

        if image_id == '':
            step_failure(
                "init",
                "Error : Failed to find or upload required OS "
                "image for this deployment")

    nova = nvclient.Client("2", **nv_creds)

    logger.info("Update security group quota for this tenant")
    neutron = ntclient.Client(**nt_creds)
    if not os_utils.update_sg_quota(neutron, tenant_id, 50, 100):
        step_failure(
            "init",
            "Failed to update security group quota for tenant " + TENANT_NAME)

    logger.info("Update cinder quota for this tenant")
    from cinderclient import client as cinderclient

    creds_cinder = os_utils.get_credentials("cinder")
    cinder_client = cinderclient.Client('1', creds_cinder['username'],
                                        creds_cinder['api_key'],
                                        creds_cinder['project_id'],
                                        creds_cinder['auth_url'],
                                        service_type="volume")
    if not os_utils.update_cinder_quota(cinder_client, tenant_id, 20, 10, 150):
        step_failure(
            "init", "Failed to update cinder quota for tenant " + TENANT_NAME)

    # ############### CLOUDIFY INITIALISATION ################

    cfy = orchestrator(vHello_DATA_DIR, CFY_INPUTS, logger)

    cfy.set_credentials(username=ks_creds['username'], password=ks_creds[
                        'password'], tenant_name=ks_creds['tenant_name'],
                        auth_url=ks_creds['auth_url'])

    logger.info("Collect flavor id for cloudify manager server")
    nova = nvclient.Client("2", **nv_creds)

    flavor_name = "m1.medium"
    flavor_id = os_utils.get_flavor_id(nova, flavor_name)
    for requirement in CFY_MANAGER_REQUIERMENTS:
        if requirement == 'ram_min':
            flavor_id = os_utils.get_flavor_id_by_ram_range(
                nova, CFY_MANAGER_REQUIERMENTS['ram_min'], 8196)

    if flavor_id == '':
        logger.error(
            "Failed to find %s flavor. "
            "Try with ram range default requirement !" % flavor_name)
        flavor_id = os_utils.get_flavor_id_by_ram_range(nova, 4000, 8196)

    if flavor_id == '':
        step_failure("orchestrator",
                     "Failed to find required flavor for this deployment")

    cfy.set_flavor_id(flavor_id)

    image_name = "centos_7"
    image_id = os_utils.get_image_id(glance, image_name)
    for requirement in CFY_MANAGER_REQUIERMENTS:
        if requirement == 'os_image':
            image_id = os_utils.get_image_id(
                glance, CFY_MANAGER_REQUIERMENTS['os_image'])

    if image_id == '':
        step_failure(
            "orchestrator",
            "Error : Failed to find required OS image for cloudify manager")

    cfy.set_image_id(image_id)

    ext_net = os_utils.get_external_net(neutron)
    if not ext_net:
        step_failure("orchestrator", "Failed to get external network")

    cfy.set_external_network_name(ext_net)

    ns = functest_utils.get_resolvconf_ns()
    if ns:
        cfy.set_nameservers(ns)

    logger.info("Prepare virtualenv for cloudify-cli")
    cmd = "chmod +x " + vHello_DIR + "create_venv.sh"
    functest_utils.execute_command(cmd, logger)
    time.sleep(3)
    cmd = vHello_DIR + "create_venv.sh " + vHello_DATA_DIR
    functest_utils.execute_command(cmd, logger)

    cfy.download_manager_blueprint(
        CFY_MANAGER_BLUEPRINT['url'], CFY_MANAGER_BLUEPRINT['branch'])

    # ############### CLOUDIFY DEPLOYMENT ################
    start_time_ts = time.time()
    end_time_ts = start_time_ts
    logger.info("Cloudify deployment Start Time:'%s'" % (
        datetime.datetime.fromtimestamp(start_time_ts).strftime(
            '%Y-%m-%d %H:%M:%S')))

    error = cfy.deploy_manager()
    if error:
        step_failure("orchestrator", error)

    end_time_ts = time.time()
    duration = round(end_time_ts - start_time_ts, 1)
    logger.info("Cloudify deployment duration:'%s'" % duration)
    set_result("orchestrator", duration, "")

    # ############### helloworld INITIALISATION ################

    cw = helloworld(CW_INPUTS, cfy, logger)

    logger.info("Collect flavor id for all helloworld vm")
    nova = nvclient.Client("2", **nv_creds)

    flavor_name = "m1.small"
    flavor_id = os_utils.get_flavor_id(nova, flavor_name)
    for requirement in CW_REQUIERMENTS:
        if requirement == 'ram_min':
            flavor_id = os_utils.get_flavor_id_by_ram_range(
                nova, CW_REQUIERMENTS['ram_min'], 8196)

    if flavor_id == '':
        logger.error(
            "Failed to find %s flavor. Try with ram range "
            "default requirement !" % flavor_name)
        flavor_id = os_utils.get_flavor_id_by_ram_range(nova, 4000, 8196)

    if flavor_id == '':
        step_failure(
            "vHello", "Failed to find required flavor for this deployment")

    cw.set_flavor_id(flavor_id)

    image_name = "ubuntu_14.04"
    image_id = os_utils.get_image_id(glance, image_name)
    for requirement in CW_REQUIERMENTS:
        if requirement == 'os_image':
            image_id = os_utils.get_image_id(
                glance, CW_REQUIERMENTS['os_image'])

    if image_id == '':
        step_failure(
            "vHello",
            "Error : Failed to find required OS image for cloudify manager")

    cw.set_image_id(image_id)

    ext_net = os_utils.get_external_net(neutron)
    if not ext_net:
        step_failure("vHello", "Failed to get external network")

    cw.set_external_network_name(ext_net)

    # ############### helloworld DEPLOYMENT ################

    start_time_ts = time.time()
    end_time_ts = start_time_ts
    logger.info("vHello VNF deployment Start Time:'%s'" % (
        datetime.datetime.fromtimestamp(start_time_ts).strftime(
            '%Y-%m-%d %H:%M:%S')))

    error = cw.deploy_vnf(CW_BLUEPRINT)
    if error:
        step_failure("vHello", error)

    end_time_ts = time.time()
    duration = round(end_time_ts - start_time_ts, 1)
    logger.info("vHello VNF deployment duration:'%s'" % duration)
    set_result("vHello", duration, "")
コード例 #9
0
    def init_performance_testToplogy(self, tplgy, performance_test_config):
        tplgy.delete_config()

        vnf_list = performance_test_config["vnf_list"]
        target_vnf = self.util.get_vnf_info(vnf_list, "target_vnf")
        tester_vm = self.util.get_vnf_info(vnf_list, "tester_vm")

        target_vnf_image_name = ""
        if "image_name" in target_vnf:
            target_vnf_image_name = target_vnf["image_name"]
        target_vnf_flavor_name = ""
        if "flavor_name" in target_vnf:
            target_vnf_flavor_name = target_vnf["flavor_name"]
        self.logger.debug("target_vnf image name : " + target_vnf_image_name)
        self.logger.debug("target_vnf flavor name : " + target_vnf_flavor_name)

        tester_vm_image_name = ""
        if "image_name" in tester_vm:
            tester_vm_image_name = tester_vm["image_name"]
        tester_vm_flavor_name = ""
        if "flavor_name" in tester_vm:
            tester_vm_flavor_name = tester_vm["flavor_name"]
        self.logger.debug("tester vm image name : " + tester_vm_image_name)
        self.logger.debug("tester vm flavor name : " + tester_vm_flavor_name)

        nova = os_utils.get_nova_client()

        # Setting the flavor id for target vnf.
        target_vnf_flavor_id = os_utils.get_flavor_id(nova,
                                                      target_vnf_flavor_name)

        if target_vnf_flavor_id == '':
            for default in self.PERFORMANCE_TEST_TPLGY_DEFAULT:
                if default == 'ram_min':
                    target_vnf_flavor_id = os_utils.get_flavor_id_by_ram_range(
                        nova, self.PERFORMANCE_TEST_TPLGY_DEFAULT['ram_min'],
                        VNF_MAX_RAM_SIZE)

        if target_vnf_flavor_id == '':
            return self.step_failure(
                "making_testTopology",
                "Error : Failed to find flavor for target vnf")

        tplgy.set_target_vnf_flavor_id(target_vnf_flavor_id)

        # Setting the flavor id for tester vm.
        tester_vm_flavor_id = os_utils.get_flavor_id(nova,
                                                     tester_vm_flavor_name)

        if tester_vm_flavor_id == '':
            for default in self.PERFORMANCE_TEST_TPLGY_DEFAULT:
                if default == 'ram_min':
                    tester_vm_flavor_id = os_utils.get_flavor_id_by_ram_range(
                        nova, self.PERFORMANCE_TEST_TPLGY_DEFAULT['ram_min'],
                        VNF_MAX_RAM_SIZE)

        if tester_vm_flavor_id == '':
            return self.step_failure(
                "making_testTopology",
                "Error : Failed to find flavor for tester vm")

        tplgy.set_send_tester_vm_flavor_id(tester_vm_flavor_id)
        tplgy.set_receive_tester_vm_flavor_id(tester_vm_flavor_id)

        # Setting the image id for target vnf.
        target_vnf_image_id = os_utils.get_image_id(self.glance,
                                                    target_vnf_image_name)

        if target_vnf_image_id == '':
            for default in self.PERFORMANCE_TEST_TPLGY_DEFAULT:
                if default == 'vnf_os_image':
                    target_vnf_image_id = os_utils.get_image_id(
                        self.glance,
                        self.PERFORMANCE_TEST_TPLGY_DEFAULT['vnf_os_image'])

        if target_vnf_image_id == '':
            return self.step_failure(
                "making_testTopology",
                "Error : Failed to find required OS image for target vnf")

        tplgy.set_target_vnf_image_id(target_vnf_image_id)

        # Setting the image id for target vnf.
        tester_vm_image_id = os_utils.get_image_id(self.glance,
                                                   tester_vm_image_name)

        if tester_vm_image_id == '':
            for default in self.PERFORMANCE_TEST_TPLGY_DEFAULT:
                if default == 'tester_os_image':
                    tester_vm_image_id = os_utils.get_image_id(
                        self.glance,
                        self.PERFORMANCE_TEST_TPLGY_DEFAULT['tester_os_image'])

        if tester_vm_image_id == '':
            return self.step_failure(
                "making_testTopology",
                "Error : Failed to find required OS image for tester vm")

        tplgy.set_send_tester_vm_image_id(tester_vm_image_id)
        tplgy.set_receive_tester_vm_image_id(tester_vm_image_id)

        tplgy.set_region(REGION_NAME)

        ext_net = os_utils.get_external_net(self.neutron)
        if not ext_net:
            return self.step_failure("making_testTopology",
                                     "Failed to get external network")

        tplgy.set_external_network_name(ext_net)

        tplgy.set_credentials(username=self.ks_cresds['username'],
                              password=self.ks_cresds['password'],
                              tenant_name=self.ks_cresds['tenant_name'],
                              auth_url=os_utils.get_endpoint('identity'))

        return self.set_resultdata(self.testcase_start_time, "", "",
                                   self.results)
コード例 #10
0
    def init_function_testToplogy(self, tplgy, function_test_config):
        tplgy.delete_config()

        self.logger.info("Collect flavor id for all topology vnf")

        vnf_list = function_test_config["vnf_list"]
        target_vnf = self.util.get_vnf_info(vnf_list, "target_vnf")
        reference_vnf = self.util.get_vnf_info(vnf_list, "reference_vnf")

        target_vnf_image_name = ""
        if "image_name" in target_vnf:
            target_vnf_image_name = target_vnf["image_name"]
        target_vnf_flavor_name = ""
        if "flavor_name" in target_vnf:
            target_vnf_flavor_name = target_vnf["flavor_name"]
        self.logger.debug("target_vnf image name : " + target_vnf_image_name)
        self.logger.debug("target_vnf flavor name : " + target_vnf_flavor_name)

        reference_vnf_image_name = ""
        if "image_name" in reference_vnf:
            reference_vnf_image_name = reference_vnf["image_name"]
        reference_vnf_flavor_name = ""
        if "flavor_name" in reference_vnf:
            reference_vnf_flavor_name = reference_vnf["flavor_name"]
        self.logger.debug("reference_vnf image name : " +
                          reference_vnf_image_name)
        self.logger.debug("reference_vnf flavor name : " +
                          reference_vnf_flavor_name)

        nova = os_utils.get_nova_client()

        # Setting the flavor id for target vnf.
        target_vnf_flavor_id = os_utils.get_flavor_id(nova,
                                                      target_vnf_flavor_name)

        if target_vnf_flavor_id == '':
            for default in self.FUNCTION_TEST_TPLGY_DEFAULT:
                if default == 'ram_min':
                    target_vnf_flavor_id = os_utils.get_flavor_id_by_ram_range(
                        nova, self.FUNCTION_TEST_TPLGY_DEFAULT['ram_min'],
                        VNF_MAX_RAM_SIZE)

            self.logger.info("target_vnf_flavor_id id search set")

        if target_vnf_flavor_id == '':
            return self.step_failure(
                "making_testTopology",
                "Error : Failed to find flavor for target vnf")

        tplgy.set_target_vnf_flavor_id(target_vnf_flavor_id)

        # Setting the flavor id for reference vnf.
        reference_vnf_flavor_id = os_utils.get_flavor_id(
            nova, reference_vnf_flavor_name)

        if reference_vnf_flavor_id == '':
            for default in self.FUNCTION_TEST_TPLGY_DEFAULT:
                if default == 'ram_min':
                    reference_vnf_flavor_id = \
                        os_utils.get_flavor_id_by_ram_range(
                            nova,
                            self.FUNCTION_TEST_TPLGY_DEFAULT['ram_min'],
                            VNF_MAX_RAM_SIZE)

            self.logger.info("reference_vnf_flavor_id id search set")

        if reference_vnf_flavor_id == '':
            return self.step_failure(
                "making_testTopology",
                "Error : Failed to find flavor for tester vm")

        tplgy.set_reference_vnf_flavor_id(reference_vnf_flavor_id)

        # Setting the image id for target vnf.
        target_vnf_image_id = os_utils.get_image_id(self.glance,
                                                    target_vnf_image_name)

        if target_vnf_image_id == '':
            for default in self.FUNCTION_TEST_TPLGY_DEFAULT:
                if default == 'os_image':
                    target_vnf_image_id = os_utils.get_image_id(
                        self.glance,
                        self.FUNCTION_TEST_TPLGY_DEFAULT['os_image'])

        if target_vnf_image_id == '':
            return self.step_failure(
                "making_testTopology",
                "Error : Failed to find required OS image for target vnf")

        tplgy.set_target_vnf_image_id(target_vnf_image_id)

        # Setting the image id for reference vnf.
        reference_vnf_image_id = os_utils.get_image_id(
            self.glance, reference_vnf_image_name)

        if reference_vnf_image_id == '':
            for default in self.FUNCTION_TEST_TPLGY_DEFAULT:
                if default == 'os_image':
                    reference_vnf_image_id = os_utils.get_image_id(
                        self.glance,
                        self.FUNCTION_TEST_TPLGY_DEFAULT['os_image'])

        if reference_vnf_image_id == '':
            return self.step_failure(
                "making_testTopology",
                "Error : Failed to find required OS image for reference vnf.")

        tplgy.set_reference_vnf_image_id(reference_vnf_image_id)

        tplgy.set_region(REGION_NAME)

        ext_net = os_utils.get_external_net(self.neutron)
        if not ext_net:
            return self.step_failure("making_testTopology",
                                     "Failed to get external network")

        tplgy.set_external_network_name(ext_net)

        tplgy.set_credentials(username=self.ks_cresds['username'],
                              password=self.ks_cresds['password'],
                              tenant_name=self.ks_cresds['tenant_name'],
                              auth_url=os_utils.get_endpoint('identity'))

        return self.set_resultdata(self.testcase_start_time, "", "",
                                   self.results)
コード例 #11
0
    def deploy_cloudify(self, cfy):

        username = self.ks_cresds['username']
        password = self.ks_cresds['password']
        tenant_name = self.ks_cresds['tenant_name']
        auth_url = os_utils.get_endpoint('identity')
        self.logger.debug("auth_url = %s" % auth_url)

        cfy.set_credentials(username, password, tenant_name, auth_url)

        self.logger.info("Collect flavor id for cloudify manager server")

        nova = os_utils.get_nova_client()

        flavor_name = "m1.large"
        flavor_id = os_utils.get_flavor_id(nova, flavor_name)

        for requirement in CFY_MANAGER_REQUIERMENTS:
            if requirement == 'ram_min':
                flavor_id = os_utils.get_flavor_id_by_ram_range(
                    nova, CFY_MANAGER_REQUIERMENTS['ram_min'],
                    CFY_MANAGER_MAX_RAM_SIZE)

        if flavor_id == '':
            self.logger.error("Failed to find %s flavor. "
                              "Try with ram range default requirement !" %
                              flavor_name)
            flavor_id = os_utils.get_flavor_id_by_ram_range(nova, 4000, 8196)

        if flavor_id == '':
            return self.step_failure(
                "making_orchestrator",
                "Failed to find required flavor for this deployment")

        cfy.set_flavor_id(flavor_id)

        image_name = "centos_7"
        image_id = os_utils.get_image_id(self.glance, image_name)

        for requirement in CFY_MANAGER_REQUIERMENTS:
            if requirement == 'os_image':
                image_id = os_utils.get_image_id(
                    self.glance, CFY_MANAGER_REQUIERMENTS['os_image'])

        if image_id == '':
            return self.step_failure(
                "making_orchestrator",
                "Error : Failed to find required OS image for cloudify manager"
            )

        cfy.set_image_id(image_id)

        ext_net = os_utils.get_external_net(self.neutron)
        if not ext_net:
            return self.step_failure("making_orchestrator",
                                     "Failed to get external network")

        cfy.set_external_network_name(ext_net)

        ns = functest_utils.get_resolvconf_ns()
        if ns:
            cfy.set_nameservers(ns)

        self.logger.info("Prepare virtualenv for cloudify-cli")
        cmd = "chmod +x " + VNF_DIR + "create_venv.sh"
        functest_utils.execute_command(cmd, self.logger)
        time.sleep(3)
        cmd = VNF_DIR + "create_venv.sh " + self.util.VNF_DATA_DIR
        functest_utils.execute_command(cmd, self.logger)

        cfy.download_manager_blueprint(CFY_MANAGER_BLUEPRINT['url'],
                                       CFY_MANAGER_BLUEPRINT['branch'])

        # ############### CLOUDIFY DEPLOYMENT ################
        start_time_ts = time.time()
        self.logger.info("Cloudify deployment Start Time:'%s'" %
                         (datetime.datetime.fromtimestamp(
                             start_time_ts).strftime('%Y-%m-%d %H:%M:%S')))

        error = cfy.deploy_manager()
        if error:
            return self.step_failure("making_orchestrator", error)

        end_time_ts = time.time()
        duration = round(end_time_ts - start_time_ts, 1)
        self.logger.info("Cloudify deployment duration:'%s'" % duration)

        self.set_result("making_orchestrator", duration, "OK")

        return self.set_resultdata(self.testcase_start_time, "", "",
                                   self.results)
コード例 #12
0
    def init(self):

        start_time_ts = time.time()

        self.util = utilvnf(self.logger)

        self.ks_cresds = os_utils.get_credentials()

        self.logger.info("Prepare OpenStack plateform(create tenant and user)")
        keystone = os_utils.get_keystone_client()

        user_id = os_utils.get_user_id(keystone, self.ks_cresds['username'])

        if user_id == '':
            return self.step_failure(
                "init",
                "Error : Failed to get id of " + self.ks_cresds['username'])

        tenant_id = os_utils.create_tenant(keystone, TENANT_NAME,
                                           TENANT_DESCRIPTION)

        if tenant_id == '':
            return self.step_failure(
                "init", "Error : Failed to create " + TENANT_NAME + " tenant")
        roles_name = ["admin", "Admin"]
        role_id = ''
        for role_name in roles_name:
            if role_id == '':
                role_id = os_utils.get_role_id(keystone, role_name)

        if role_id == '':
            self.logger.error("Error : Failed to get id for %s role" %
                              role_name)

        if not os_utils.add_role_user(keystone, user_id, role_id, tenant_id):

            self.logger.error("Error : Failed to add %s on tenant" %
                              self.ks_cresds['username'])

        user_id = os_utils.create_user(keystone, TENANT_NAME, TENANT_NAME,
                                       None, tenant_id)
        if user_id == '':
            self.logger.error("Error : Failed to create %s user" % TENANT_NAME)

        if not os_utils.add_role_user(keystone, user_id, role_id, tenant_id):

            self.logger.error("Failed to add %s on tenant" % TENANT_NAME)

        self.logger.info("Update OpenStack creds informations")

        self.ks_cresds.update({
            "tenant_name": TENANT_NAME,
        })

        self.neutron = os_utils.get_neutron_client(self.ks_cresds)
        nova = os_utils.get_nova_client()
        self.glance = os_utils.get_glance_client(self.ks_cresds)

        self.ks_cresds.update({
            "username": TENANT_NAME,
            "password": TENANT_NAME,
        })

        self.load_test_env_config()

        self.logger.info("Upload some OS images if it doesn't exist")
        images = {}
        images.update(IMAGES)
        images.update(self.VNF_TEST_IMAGES)
        for img in images.keys():
            image_name = images[img]['image_name']
            self.logger.info("image name = " + image_name)
            image_url = images[img]['image_url']

            image_id = os_utils.get_image_id(self.glance, image_name)

            if image_id == '':
                self.logger.info(
                    """%s image doesn't exist on glance repository. Try
                downloading this image and upload on glance !""" % image_name)
                image_id = self.download_and_add_image_on_glance(
                    self.glance, image_name, image_url)

            if image_id == '':
                return self.step_failure(
                    "init", "Error : Failed to find or upload required OS "
                    "image for this deployment")

        self.logger.info("Update security group quota for this tenant")

        result = os_utils.update_sg_quota(self.neutron, tenant_id, 50, 100)

        if not result:
            return self.step_failure(
                "init", "Failed to update security group quota for tenant " +
                TENANT_NAME)

        self.credentials = {
            "username": TENANT_NAME,
            "password": TENANT_NAME,
            "auth_url": os_utils.get_endpoint('identity'),
            "tenant_name": TENANT_NAME,
            "region_name": os.environ['OS_REGION_NAME']
        }

        self.util.set_credentials(self.credentials["username"],
                                  self.credentials["password"],
                                  self.credentials["auth_url"],
                                  self.credentials["tenant_name"],
                                  self.credentials["region_name"])

        test_scenario_file = open(self.util.TEST_SCENATIO_YAML_FILE, 'r')
        self.test_scenario_yaml = yaml.safe_load(test_scenario_file)
        test_scenario_file.close()

        res = self.util.test_scenario_validation_check(self.test_scenario_yaml)
        if res["status"] is False:
            self.logger.error(res["message"])
            return self.step_failure("init",
                                     "Error : Faild to test execution.")

        self.logger.info("Test scenario yaml validation check : " +
                         res["message"])

        end_time_ts = time.time()
        duration = round(end_time_ts - start_time_ts, 1)

        self.set_result("init", duration, "OK")

        return self.set_resultdata(self.testcase_start_time, "", "",
                                   self.results)
コード例 #13
0
ファイル: vPing_ssh.py プロジェクト: MatthewLiHW/functest
def main():

    creds_nova = openstack_utils.get_credentials("nova")
    nova_client = novaclient.Client('2', **creds_nova)
    creds_neutron = openstack_utils.get_credentials("neutron")
    neutron_client = neutronclient.Client(**creds_neutron)
    creds_keystone = openstack_utils.get_credentials("keystone")
    keystone_client = keystoneclient.Client(**creds_keystone)
    glance_endpoint = keystone_client.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance_client = glanceclient.Client(1, glance_endpoint,
                                        token=keystone_client.auth_token)
    EXIT_CODE = -1

    image_id = None
    flavor = None

    # Check if the given image exists
    image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
    if image_id != '':
        logger.info("Using existing image '%s'..." % GLANCE_IMAGE_NAME)
        global image_exists
        image_exists = True
    else:
        logger.info("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
                                                          GLANCE_IMAGE_PATH))
        image_id = openstack_utils.create_glance_image(glance_client,
                                                       GLANCE_IMAGE_NAME,
                                                       GLANCE_IMAGE_PATH)
        if not image_id:
            logger.error("Failed to create a Glance image...")
            return(EXIT_CODE)
        logger.debug("Image '%s' with ID=%s created successfully."
                     % (GLANCE_IMAGE_NAME, image_id))

    network_dic = openstack_utils.create_network_full(logger,
                                                      neutron_client,
                                                      PRIVATE_NET_NAME,
                                                      PRIVATE_SUBNET_NAME,
                                                      ROUTER_NAME,
                                                      PRIVATE_SUBNET_CIDR)
    if not network_dic:
        logger.error(
            "There has been a problem when creating the neutron network")
        return(EXIT_CODE)

    network_id = network_dic["net_id"]

    sg_id = create_security_group(neutron_client)

    # Check if the given flavor exists
    try:
        flavor = nova_client.flavors.find(name=FLAVOR)
        logger.info("Using existing Flavor '%s'..." % FLAVOR)
    except:
        logger.error("Flavor '%s' not found." % FLAVOR)
        logger.info("Available flavors are: ")
        pMsg(nova_client.flavor.list())
        return(EXIT_CODE)

    # Deleting instances if they exist
    servers = nova_client.servers.list()
    for server in servers:
        if server.name == NAME_VM_1 or server.name == NAME_VM_2:
            logger.info("Instance %s found. Deleting..." % server.name)
            server.delete()

    # boot VM 1
    start_time = time.time()
    stop_time = start_time
    logger.info("vPing Start Time:'%s'" % (
        datetime.datetime.fromtimestamp(start_time).strftime(
            '%Y-%m-%d %H:%M:%S')))

    logger.info("Creating instance '%s'..." % NAME_VM_1)
    logger.debug(
        "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
        "network=%s \n" % (NAME_VM_1, flavor, image_id, network_id))
    vm1 = nova_client.servers.create(
        name=NAME_VM_1,
        flavor=flavor,
        image=image_id,
        nics=[{"net-id": network_id}]
    )

    # wait until VM status is active
    if not waitVmActive(nova_client, vm1):
        logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
            NAME_VM_1, openstack_utils.get_instance_status(nova_client, vm1)))
        return (EXIT_CODE)
    else:
        logger.info("Instance '%s' is ACTIVE." % NAME_VM_1)

    # Retrieve IP of first VM
    test_ip = vm1.networks.get(PRIVATE_NET_NAME)[0]
    logger.debug("Instance '%s' got private ip '%s'." % (NAME_VM_1, test_ip))

    logger.info("Adding '%s' to security group '%s'..."
                % (NAME_VM_1, SECGROUP_NAME))
    openstack_utils.add_secgroup_to_instance(nova_client, vm1.id, sg_id)

    # boot VM 2
    logger.info("Creating instance '%s'..." % NAME_VM_2)
    logger.debug(
        "Configuration:\n name=%s \n flavor=%s \n image=%s \n "
        "network=%s \n" % (NAME_VM_2, flavor, image_id, network_id))
    vm2 = nova_client.servers.create(
        name=NAME_VM_2,
        flavor=flavor,
        image=image_id,
        nics=[{"net-id": network_id}]
    )

    if not waitVmActive(nova_client, vm2):
        logger.error("Instance '%s' cannot be booted. Status is '%s'" % (
            NAME_VM_2, openstack_utils.get_instance_status(nova_client, vm2)))
        return (EXIT_CODE)
    else:
        logger.info("Instance '%s' is ACTIVE." % NAME_VM_2)

    logger.info("Adding '%s' to security group '%s'..." % (NAME_VM_2,
                                                           SECGROUP_NAME))
    openstack_utils.add_secgroup_to_instance(nova_client, vm2.id, sg_id)

    logger.info("Creating floating IP for VM '%s'..." % NAME_VM_2)
    floatip_dic = openstack_utils.create_floating_ip(neutron_client)
    floatip = floatip_dic['fip_addr']
    # floatip_id = floatip_dic['fip_id']

    if floatip is None:
        logger.error("Cannot create floating IP.")
        return (EXIT_CODE)
    logger.info("Floating IP created: '%s'" % floatip)

    logger.info("Associating floating ip: '%s' to VM '%s' "
                % (floatip, NAME_VM_2))
    if not openstack_utils.add_floating_ip(nova_client, vm2.id, floatip):
        logger.error("Cannot associate floating IP to VM.")
        return (EXIT_CODE)

    logger.info("Trying to establish SSH connection to %s..." % floatip)
    username = '******'
    password = '******'
    ssh = paramiko.SSHClient()
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())

    timeout = 50
    nolease = False
    got_ip = False
    discover_count = 0
    cidr_first_octet = PRIVATE_SUBNET_CIDR.split('.')[0]
    while timeout > 0:
        try:
            ssh.connect(floatip, username=username,
                        password=password, timeout=2)
            logger.debug("SSH connection established to %s." % floatip)
            break
        except:
            logger.debug("Waiting for %s..." % floatip)
            time.sleep(6)
            timeout -= 1

        console_log = vm2.get_console_output()

        # print each "Sending discover" captured on the console log
        if (len(re.findall("Sending discover", console_log)) >
                discover_count and not got_ip):
            discover_count += 1
            logger.debug("Console-log '%s': Sending discover..."
                         % NAME_VM_2)

        # check if eth0 got an ip,the line looks like this:
        # "inet addr:192.168."....
        # if the dhcp agent fails to assing ip, this line will not appear
        if "inet addr:" + cidr_first_octet in console_log and not got_ip:
            got_ip = True
            logger.debug("The instance '%s' succeeded to get the IP "
                         "from the dhcp agent.")

        # if dhcp doesnt work,it shows "No lease, failing".The test will fail
        if "No lease, failing" in console_log and not nolease and not got_ip:
                nolease = True
                logger.debug("Console-log '%s': No lease, failing..."
                             % NAME_VM_2)
                logger.info("The instance failed to get an IP from the "
                            "DHCP agent. The test will probably timeout...")

    if timeout == 0:  # 300 sec timeout (5 min)
        logger.error("Cannot establish connection to IP '%s'. Aborting"
                     % floatip)
        return (EXIT_CODE)

    scp = SCPClient(ssh.get_transport())

    ping_script = REPO_PATH + "testcases/OpenStack/vPing/ping.sh"
    try:
        scp.put(ping_script, "~/")
    except:
        logger.error("Cannot SCP the file '%s' to VM '%s'"
                     % (ping_script, floatip))
        return (EXIT_CODE)

    cmd = 'chmod 755 ~/ping.sh'
    (stdin, stdout, stderr) = ssh.exec_command(cmd)
    for line in stdout.readlines():
        print line

    logger.info("Waiting for ping...")
    sec = 0
    stop_time = time.time()
    duration = 0

    cmd = '~/ping.sh ' + test_ip
    flag = False
    status = "FAIL"

    while True:
        time.sleep(1)
        (stdin, stdout, stderr) = ssh.exec_command(cmd)
        output = stdout.readlines()

        for line in output:
            if "vPing OK" in line:
                logger.info("vPing detected!")
                status = "PASS"
                # we consider start time at VM1 booting
                stop_time = time.time()
                duration = round(stop_time - start_time, 1)
                logger.info("vPing duration:'%s' s." % duration)
                EXIT_CODE = 0
                flag = True
                break

            elif sec == PING_TIMEOUT:
                logger.info("Timeout reached.")
                flag = True
                break
        if flag:
            break
        logger.debug("Pinging %s. Waiting for response..." % test_ip)
        sec += 1

    if status == "PASS":
        logger.info("vPing OK")
    else:
        duration = 0
        logger.error("vPing FAILED")

    if args.report:
        try:
            logger.debug("Pushing vPing SSH results into DB...")
            functest_utils.push_results_to_db("functest",
                                              "vping_ssh",
                                              logger,
                                              start_time,
                                              stop_time,
                                              status,
                                              details={'timestart': start_time,
                                                       'duration': duration,
                                                       'status': status})
        except:
            logger.error("Error pushing results into Database '%s'"
                         % sys.exc_info()[0])

    exit(EXIT_CODE)
コード例 #14
0
def main():

    # ############### GENERAL INITIALISATION ################

    if not os.path.exists(VIMS_DATA_DIR):
        os.makedirs(VIMS_DATA_DIR)

    ks_creds = os_utils.get_credentials("keystone")
    nv_creds = os_utils.get_credentials("nova")
    nt_creds = os_utils.get_credentials("neutron")

    logger.info("Prepare OpenStack plateform (create tenant and user)")
    keystone = ksclient.Client(**ks_creds)

    user_id = os_utils.get_user_id(keystone, ks_creds['username'])
    if user_id == '':
        step_failure("init", "Error : Failed to get id of " +
                     ks_creds['username'])

    tenant_id = os_utils.create_tenant(
        keystone, TENANT_NAME, TENANT_DESCRIPTION)
    if tenant_id == '':
        step_failure("init", "Error : Failed to create " +
                     TENANT_NAME + " tenant")

    roles_name = ["admin", "Admin"]
    role_id = ''
    for role_name in roles_name:
        if role_id == '':
            role_id = os_utils.get_role_id(keystone, role_name)

    if role_id == '':
        logger.error("Error : Failed to get id for %s role" % role_name)

    if not os_utils.add_role_user(keystone, user_id, role_id, tenant_id):
        logger.error("Error : Failed to add %s on tenant" %
                     ks_creds['username'])

    user_id = os_utils.create_user(
        keystone, TENANT_NAME, TENANT_NAME, None, tenant_id)
    if user_id == '':
        logger.error("Error : Failed to create %s user" % TENANT_NAME)

    logger.info("Update OpenStack creds informations")
    ks_creds.update({
        "username": TENANT_NAME,
        "password": TENANT_NAME,
        "tenant_name": TENANT_NAME,
    })

    nt_creds.update({
        "tenant_name": TENANT_NAME,
    })

    nv_creds.update({
        "project_id": TENANT_NAME,
    })

    logger.info("Upload some OS images if it doesn't exist")
    glance_endpoint = keystone.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance = glclient.Client(1, glance_endpoint, token=keystone.auth_token)

    for img in IMAGES.keys():
        image_name = IMAGES[img]['image_name']
        image_url = IMAGES[img]['image_url']

        image_id = os_utils.get_image_id(glance, image_name)

        if image_id == '':
            logger.info("""%s image doesn't exist on glance repository. Try
            downloading this image and upload on glance !""" % image_name)
            image_id = download_and_add_image_on_glance(
                glance, image_name, image_url)

        if image_id == '':
            step_failure(
                "init",
                "Error : Failed to find or upload required OS "
                "image for this deployment")

    nova = nvclient.Client("2", **nv_creds)

    logger.info("Update security group quota for this tenant")
    neutron = ntclient.Client(**nt_creds)
    if not os_utils.update_sg_quota(neutron, tenant_id, 50, 100):
        step_failure(
            "init",
            "Failed to update security group quota for tenant " + TENANT_NAME)

    logger.info("Update cinder quota for this tenant")
    from cinderclient import client as cinderclient

    creds_cinder = os_utils.get_credentials("cinder")
    cinder_client = cinderclient.Client('1', creds_cinder['username'],
                                        creds_cinder['api_key'],
                                        creds_cinder['project_id'],
                                        creds_cinder['auth_url'],
                                        service_type="volume")
    if not os_utils.update_cinder_quota(cinder_client, tenant_id, 20, 10, 150):
        step_failure(
            "init", "Failed to update cinder quota for tenant " + TENANT_NAME)

    # ############### CLOUDIFY INITIALISATION ################

    cfy = orchestrator(VIMS_DATA_DIR, CFY_INPUTS, logger)

    cfy.set_credentials(username=ks_creds['username'], password=ks_creds[
                        'password'], tenant_name=ks_creds['tenant_name'],
                        auth_url=ks_creds['auth_url'])

    logger.info("Collect flavor id for cloudify manager server")
    nova = nvclient.Client("2", **nv_creds)

    flavor_name = "m1.medium"
    flavor_id = os_utils.get_flavor_id(nova, flavor_name)
    for requirement in CFY_MANAGER_REQUIERMENTS:
        if requirement == 'ram_min':
            flavor_id = os_utils.get_flavor_id_by_ram_range(
                nova, CFY_MANAGER_REQUIERMENTS['ram_min'], 8196)

    if flavor_id == '':
        logger.error(
            "Failed to find %s flavor. "
            "Try with ram range default requirement !" % flavor_name)
        flavor_id = os_utils.get_flavor_id_by_ram_range(nova, 4000, 8196)

    if flavor_id == '':
        step_failure("orchestrator",
                     "Failed to find required flavor for this deployment")

    cfy.set_flavor_id(flavor_id)

    image_name = "centos_7"
    image_id = os_utils.get_image_id(glance, image_name)
    for requirement in CFY_MANAGER_REQUIERMENTS:
        if requirement == 'os_image':
            image_id = os_utils.get_image_id(
                glance, CFY_MANAGER_REQUIERMENTS['os_image'])

    if image_id == '':
        step_failure(
            "orchestrator",
            "Error : Failed to find required OS image for cloudify manager")

    cfy.set_image_id(image_id)

    ext_net = os_utils.get_external_net(neutron)
    if not ext_net:
        step_failure("orchestrator", "Failed to get external network")

    cfy.set_external_network_name(ext_net)

    ns = functest_utils.get_resolvconf_ns()
    if ns:
        cfy.set_nameservers(ns)

    logger.info("Prepare virtualenv for cloudify-cli")
    cmd = "chmod +x " + VIMS_DIR + "create_venv.sh"
    functest_utils.execute_command(cmd, logger)
    time.sleep(3)
    cmd = VIMS_DIR + "create_venv.sh " + VIMS_DATA_DIR
    functest_utils.execute_command(cmd, logger)

    cfy.download_manager_blueprint(
        CFY_MANAGER_BLUEPRINT['url'], CFY_MANAGER_BLUEPRINT['branch'])

    # ############### CLOUDIFY DEPLOYMENT ################
    start_time_ts = time.time()
    end_time_ts = start_time_ts
    logger.info("Cloudify deployment Start Time:'%s'" % (
        datetime.datetime.fromtimestamp(start_time_ts).strftime(
            '%Y-%m-%d %H:%M:%S')))

    error = cfy.deploy_manager()
    if error:
        step_failure("orchestrator", error)

    end_time_ts = time.time()
    duration = round(end_time_ts - start_time_ts, 1)
    logger.info("Cloudify deployment duration:'%s'" % duration)
    set_result("orchestrator", duration, "")

    # ############### CLEARWATER INITIALISATION ################

    cw = clearwater(CW_INPUTS, cfy, logger)

    logger.info("Collect flavor id for all clearwater vm")
    nova = nvclient.Client("2", **nv_creds)

    flavor_name = "m1.small"
    flavor_id = os_utils.get_flavor_id(nova, flavor_name)
    for requirement in CW_REQUIERMENTS:
        if requirement == 'ram_min':
            flavor_id = os_utils.get_flavor_id_by_ram_range(
                nova, CW_REQUIERMENTS['ram_min'], 8196)

    if flavor_id == '':
        logger.error(
            "Failed to find %s flavor. Try with ram range "
            "default requirement !" % flavor_name)
        flavor_id = os_utils.get_flavor_id_by_ram_range(nova, 4000, 8196)

    if flavor_id == '':
        step_failure(
            "vIMS", "Failed to find required flavor for this deployment")

    cw.set_flavor_id(flavor_id)

    image_name = "ubuntu_14.04"
    image_id = os_utils.get_image_id(glance, image_name)
    for requirement in CW_REQUIERMENTS:
        if requirement == 'os_image':
            image_id = os_utils.get_image_id(
                glance, CW_REQUIERMENTS['os_image'])

    if image_id == '':
        step_failure(
            "vIMS",
            "Error : Failed to find required OS image for cloudify manager")

    cw.set_image_id(image_id)

    ext_net = os_utils.get_external_net(neutron)
    if not ext_net:
        step_failure("vIMS", "Failed to get external network")

    cw.set_external_network_name(ext_net)

    # ############### CLEARWATER DEPLOYMENT ################

    start_time_ts = time.time()
    end_time_ts = start_time_ts
    logger.info("vIMS VNF deployment Start Time:'%s'" % (
        datetime.datetime.fromtimestamp(start_time_ts).strftime(
            '%Y-%m-%d %H:%M:%S')))

    error = cw.deploy_vnf(CW_BLUEPRINT)
    if error:
        step_failure("vIMS", error)

    end_time_ts = time.time()
    duration = round(end_time_ts - start_time_ts, 1)
    logger.info("vIMS VNF deployment duration:'%s'" % duration)
    set_result("vIMS", duration, "")

    # ############### CLEARWATER TEST ################

    test_clearwater()

    # ########## CLEARWATER UNDEPLOYMENT ############

    cw.undeploy_vnf()

    # ########### CLOUDIFY UNDEPLOYMENT #############

    cfy.undeploy_manager()

    # ############## GENERAL CLEANUP ################
    if args.noclean:
        exit(0)

    ks_creds = os_utils.get_credentials("keystone")

    keystone = ksclient.Client(**ks_creds)

    logger.info("Removing %s tenant .." % CFY_INPUTS['keystone_tenant_name'])
    tenant_id = os_utils.get_tenant_id(
        keystone, CFY_INPUTS['keystone_tenant_name'])
    if tenant_id == '':
        logger.error("Error : Failed to get id of %s tenant" %
                     CFY_INPUTS['keystone_tenant_name'])
    else:
        if not os_utils.delete_tenant(keystone, tenant_id):
            logger.error("Error : Failed to remove %s tenant" %
                         CFY_INPUTS['keystone_tenant_name'])

    logger.info("Removing %s user .." % CFY_INPUTS['keystone_username'])
    user_id = os_utils.get_user_id(
        keystone, CFY_INPUTS['keystone_username'])
    if user_id == '':
        logger.error("Error : Failed to get id of %s user" %
                     CFY_INPUTS['keystone_username'])
    else:
        if not os_utils.delete_user(keystone, user_id):
            logger.error("Error : Failed to remove %s user" %
                         CFY_INPUTS['keystone_username'])
コード例 #15
0
def main():
    global SUMMARY
    global network_dict
    start_time = time.time()
    stop_time = start_time

    # configure script
    if not (args.test_name in tests):
        logger.error('argument not valid')
        exit(-1)

    SUMMARY = []
    creds_nova = openstack_utils.get_credentials("nova")
    nova_client = novaclient.Client('2', **creds_nova)
    creds_neutron = openstack_utils.get_credentials("neutron")
    neutron_client = neutronclient.Client(**creds_neutron)
    creds_keystone = openstack_utils.get_credentials("keystone")
    keystone_client = keystoneclient.Client(**creds_keystone)
    glance_endpoint = keystone_client.service_catalog.url_for(
        service_type='image', endpoint_type='publicURL')
    glance_client = glanceclient.Client(1, glance_endpoint,
                                        token=keystone_client.auth_token)
    creds_cinder = openstack_utils.get_credentials("cinder")
    cinder_client = cinderclient.Client('2', creds_cinder['username'],
                                        creds_cinder['api_key'],
                                        creds_cinder['project_id'],
                                        creds_cinder['auth_url'],
                                        service_type="volume")

    client_dict['neutron'] = neutron_client

    volume_types = openstack_utils.list_volume_types(cinder_client,
                                                     private=False)
    if not volume_types:
        volume_type = openstack_utils.create_volume_type(
            cinder_client, CINDER_VOLUME_TYPE_NAME)
        if not volume_type:
            logger.error("Failed to create volume type...")
            exit(-1)
        else:
            logger.debug("Volume type '%s' created succesfully..."
                         % CINDER_VOLUME_TYPE_NAME)
    else:
        logger.debug("Using existing volume type(s)...")

    image_id = openstack_utils.get_image_id(glance_client, GLANCE_IMAGE_NAME)
    image_exists = False

    if image_id == '':
        logger.debug("Creating image '%s' from '%s'..." % (GLANCE_IMAGE_NAME,
                                                           GLANCE_IMAGE_PATH))
        image_id = openstack_utils.create_glance_image(glance_client,
                                                       GLANCE_IMAGE_NAME,
                                                       GLANCE_IMAGE_PATH)
        if not image_id:
            logger.error("Failed to create the Glance image...")
            exit(-1)
        else:
            logger.debug("Image '%s' with ID '%s' created succesfully ."
                         % (GLANCE_IMAGE_NAME, image_id))
    else:
        logger.debug("Using existing image '%s' with ID '%s'..."
                     % (GLANCE_IMAGE_NAME, image_id))
        image_exists = True

    logger.debug("Creating network '%s'..." % PRIVATE_NET_NAME)
    network_dict = openstack_utils.create_network_full(logger,
                                                       client_dict['neutron'],
                                                       PRIVATE_NET_NAME,
                                                       PRIVATE_SUBNET_NAME,
                                                       ROUTER_NAME,
                                                       PRIVATE_SUBNET_CIDR)
    if not network_dict:
        logger.error("Failed to create network...")
        exit(-1)
    else:
        if not openstack_utils.update_neutron_net(client_dict['neutron'],
                                                  network_dict['net_id'],
                                                  shared=True):
            logger.error("Failed to update network...")
            exit(-1)
        else:
            logger.debug("Network '%s' available..." % PRIVATE_NET_NAME)

    if args.test_name == "all":
        for test_name in tests:
            if not (test_name == 'all' or
                    test_name == 'vm'):
                run_task(test_name)
    else:
        logger.debug("Test name: " + args.test_name)
        run_task(args.test_name)

    report = ("\n"
              "                                                              "
              "\n"
              "                     Rally Summary Report\n"
              "\n"
              "+===================+============+===============+===========+"
              "\n"
              "| Module            | Duration   | nb. Test Run  | Success   |"
              "\n"
              "+===================+============+===============+===========+"
              "\n")
    payload = []
    stop_time = time.time()

    # for each scenario we draw a row for the table
    total_duration = 0.0
    total_nb_tests = 0
    total_success = 0.0
    for s in SUMMARY:
        name = "{0:<17}".format(s['test_name'])
        duration = float(s['overall_duration'])
        total_duration += duration
        duration = time.strftime("%M:%S", time.gmtime(duration))
        duration = "{0:<10}".format(duration)
        nb_tests = "{0:<13}".format(s['nb_tests'])
        total_nb_tests += int(s['nb_tests'])
        success = "{0:<10}".format(str(s['success']) + '%')
        total_success += float(s['success'])
        report += ("" +
                   "| " + name + " | " + duration + " | " +
                   nb_tests + " | " + success + "|\n" +
                   "+-------------------+------------"
                   "+---------------+-----------+\n")
        payload.append({'module': name,
                        'details': {'duration': s['overall_duration'],
                                    'nb tests': s['nb_tests'],
                                    'success': s['success']}})

    total_duration_str = time.strftime("%H:%M:%S", time.gmtime(total_duration))
    total_duration_str2 = "{0:<10}".format(total_duration_str)
    total_nb_tests_str = "{0:<13}".format(total_nb_tests)
    total_success = "{:0.2f}".format(total_success / len(SUMMARY))
    total_success_str = "{0:<10}".format(str(total_success) + '%')
    report += "+===================+============+===============+===========+"
    report += "\n"
    report += ("| TOTAL:            | " + total_duration_str2 + " | " +
               total_nb_tests_str + " | " + total_success_str + "|\n")
    report += "+===================+============+===============+===========+"
    report += "\n"

    logger.info("\n" + report)
    payload.append({'summary': {'duration': total_duration,
                                'nb tests': total_nb_tests,
                                'nb success': total_success}})

    # Generate json results for DB
    # json_results = {"timestart": time_start, "duration": total_duration,
    #                "tests": int(total_nb_tests),
    #                "success": int(total_success)}
    # logger.info("Results: "+str(json_results))

    # Evaluation of the success criteria
    status = "failed"
    # for Rally we decided that the overall success rate must be above 90%
    if total_success >= 90:
        status = "passed"

    if args.sanity:
        case_name = "rally_sanity"
    else:
        case_name = "rally_full"

    if args.report:
        logger.debug("Pushing Rally summary into DB...")
        functest_utils.push_results_to_db("functest",
                                          case_name,
                                          None,
                                          start_time,
                                          stop_time,
                                          status,
                                          payload)
    if args.noclean:
        exit(0)

    if not image_exists:
        logger.debug("Deleting image '%s' with ID '%s'..."
                     % (GLANCE_IMAGE_NAME, image_id))
        if not openstack_utils.delete_glance_image(nova_client, image_id):
            logger.error("Error deleting the glance image")

    if not volume_types:
        logger.debug("Deleting volume type '%s'..."
                     % CINDER_VOLUME_TYPE_NAME)
        if not openstack_utils.delete_volume_type(cinder_client, volume_type):
            logger.error("Error in deleting volume type...")