def SetOnosIp(): # cmd = "openstack catalog show network | grep publicURL" neutron_url = openstack_utils.get_endpoint(service_type='network') OC1 = urlparse.urlparse(neutron_url).hostname os.environ['OC1'] = OC1 time.sleep(2) logger.debug("ONOS IP is " + OC1)
def run(self, **kwargs): """Run suites in OPNFV environment It basically check env vars to call main() with the keywords required. Args: kwargs: Arbitrary keyword arguments. Returns: EX_OK if all suites ran well. EX_RUN_ERROR otherwise. """ try: suites = self.default_suites try: suites = kwargs["suites"] except KeyError: pass neutron_url = op_utils.get_endpoint(service_type='network') kwargs = {'neutronip': urllib.parse.urlparse(neutron_url).hostname} kwargs['odlip'] = kwargs['neutronip'] kwargs['odlwebport'] = '8080' kwargs['odlrestconfport'] = '8181' kwargs['odlusername'] = '******' kwargs['odlpassword'] = '******' installer_type = None if 'INSTALLER_TYPE' in os.environ: installer_type = os.environ['INSTALLER_TYPE'] kwargs['osusername'] = os.environ['OS_USERNAME'] kwargs['ostenantname'] = os.environ['OS_TENANT_NAME'] kwargs['osauthurl'] = os.environ['OS_AUTH_URL'] kwargs['ospassword'] = os.environ['OS_PASSWORD'] if installer_type == 'fuel': kwargs['odlwebport'] = '8282' elif installer_type == 'apex' or installer_type == 'netvirt': kwargs['odlip'] = os.environ['SDN_CONTROLLER_IP'] kwargs['odlwebport'] = '8081' kwargs['odlrestconfport'] = '8081' elif installer_type == 'joid': kwargs['odlip'] = os.environ['SDN_CONTROLLER'] elif installer_type == 'compass': kwargs['odlrestconfport'] = '8080' elif installer_type == 'daisy': kwargs['odlip'] = os.environ['SDN_CONTROLLER_IP'] kwargs['odlwebport'] = '8181' kwargs['odlrestconfport'] = '8087' else: kwargs['odlip'] = os.environ['SDN_CONTROLLER_IP'] except KeyError as ex: self.__logger.error("Cannot run ODL testcases. " "Please check env var: " "%s", str(ex)) return self.EX_RUN_ERROR except Exception: # pylint: disable=broad-except self.__logger.exception("Cannot run ODL testcases.") return self.EX_RUN_ERROR return self.run_suites(suites, **kwargs)
def set_onos_ip(self): if (self.installer_type and self.installer_type.lower() == 'joid'): sdn_controller_env = os.getenv('SDN_CONTROLLER') OC1 = re.search(r"\d+\.\d+\.\d+\.\d+", sdn_controller_env).group() else: neutron_url = openstack_utils.get_endpoint(service_type='network') OC1 = urlparse.urlparse(neutron_url).hostname os.environ['OC1'] = OC1 self.logger.debug("ONOS IP is %s", OC1)
def run(self, **kwargs): try: suites = self.default_suites try: suites = kwargs["suites"] except KeyError: pass keystone_url = op_utils.get_endpoint(service_type='identity') neutron_url = op_utils.get_endpoint(service_type='network') kwargs = {'keystoneip': urlparse.urlparse(keystone_url).hostname} kwargs['neutronip'] = urlparse.urlparse(neutron_url).hostname kwargs['odlip'] = kwargs['neutronip'] kwargs['odlwebport'] = '8080' kwargs['odlrestconfport'] = '8181' kwargs['odlusername'] = '******' kwargs['odlpassword'] = '******' installer_type = None if 'INSTALLER_TYPE' in os.environ: installer_type = os.environ['INSTALLER_TYPE'] kwargs['osusername'] = os.environ['OS_USERNAME'] kwargs['ostenantname'] = os.environ['OS_TENANT_NAME'] kwargs['ospassword'] = os.environ['OS_PASSWORD'] if installer_type == 'fuel': kwargs['odlwebport'] = '8282' elif installer_type == 'apex' or installer_type == 'netvirt': kwargs['odlip'] = os.environ['SDN_CONTROLLER_IP'] kwargs['odlwebport'] = '8081' kwargs['odlrestconfport'] = '8081' elif installer_type == 'joid': kwargs['odlip'] = os.environ['SDN_CONTROLLER'] elif installer_type == 'compass': kwargs['odlwebport'] = '8181' else: kwargs['odlip'] = os.environ['SDN_CONTROLLER_IP'] except KeyError as e: self.logger.error("Cannot run ODL testcases. " "Please check env var: " "%s" % str(e)) return self.EX_RUN_ERROR except Exception: self.logger.exception("Cannot run ODL testcases.") return self.EX_RUN_ERROR return self.main(suites, **kwargs)
def configure_tempest_multisite_params(tempest_conf_file): """ Add/update multisite parameters into tempest.conf file generated by Rally """ logger.debug("Updating multisite tempest.conf parameters...") config = ConfigParser.RawConfigParser() config.read(tempest_conf_file) config.set('service_available', 'kingbird', 'true') # cmd = ("openstack endpoint show kingbird | grep publicurl |" # "awk '{print $4}' | awk -F '/' '{print $4}'") # kingbird_api_version = os.popen(cmd).read() # kingbird_api_version = os_utils.get_endpoint(service_type='multisite') if CI_INSTALLER_TYPE == 'fuel': # For MOS based setup, the service is accessible # via bind host kingbird_conf_path = "/etc/kingbird/kingbird.conf" installer_type = CI_INSTALLER_TYPE installer_ip = CI_INSTALLER_IP installer_username = CONST.__getattribute__( 'multisite_{}_installer_username'.format(installer_type)) installer_password = CONST.__getattribute__( 'multisite_{}_installer_password'.format(installer_type)) ssh_options = ("-o UserKnownHostsFile=/dev/null -o " "StrictHostKeyChecking=no") # Get the controller IP from the fuel node cmd = ('sshpass -p %s ssh 2>/dev/null %s %s@%s ' '\'fuel node --env 1| grep controller | grep "True\| 1" ' '| awk -F\| "{print \$5}"\'' % (installer_password, ssh_options, installer_username, installer_ip)) multisite_controller_ip = "".join(os.popen(cmd).read().split()) # Login to controller and get bind host details cmd = ('sshpass -p %s ssh 2>/dev/null %s %s@%s "ssh %s \\" ' 'grep -e "^bind_" %s \\""' % (installer_password, ssh_options, installer_username, installer_ip, multisite_controller_ip, kingbird_conf_path)) bind_details = os.popen(cmd).read() bind_details = "".join(bind_details.split()) # Extract port number from the bind details bind_port = re.findall(r"\D(\d{4})", bind_details)[0] # Extract ip address from the bind details bind_host = re.findall(r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}", bind_details)[0] kingbird_endpoint_url = "http://%s:%s/" % (bind_host, bind_port) else: # cmd = "openstack endpoint show kingbird | grep publicurl |\ # awk '{print $4}' | awk -F '/' '{print $3}'" # kingbird_endpoint_url = os.popen(cmd).read() kingbird_endpoint_url = os_utils.get_endpoint(service_type='kingbird') try: config.add_section("kingbird") except Exception: logger.info('kingbird section exist') # set the domain id config.set('auth', 'admin_domain_name', 'default') config.set('kingbird', 'endpoint_type', 'publicURL') config.set('kingbird', 'TIME_TO_SYNC', '120') config.set('kingbird', 'endpoint_url', kingbird_endpoint_url) config.set('kingbird', 'api_version', 'v1.0') with open(tempest_conf_file, 'wb') as config_file: config.write(config_file) backup_tempest_config(tempest_conf_file)
def get_ip(self, type): url = openstack_utils.get_endpoint(service_type=type) self.logger.debug('get_ip for %s: %s', type, url) return urlparse.urlparse(url).hostname
def deploy_vnf(self): start_time = time.time() self.__logger.info("Upload VNFD") cfy_client = self.orchestrator['object'] descriptor = self.vnf['descriptor'] self.deployment_name = descriptor.get('name') vrouter_blueprint_dir = os.path.join(self.data_dir, self.util.blueprint_dir) if not os.path.exists(vrouter_blueprint_dir): Repo.clone_from(descriptor.get('url'), vrouter_blueprint_dir, branch=descriptor.get('version')) cfy_client.blueprints.upload( vrouter_blueprint_dir + self.util.blueprint_file_name, descriptor.get('name')) self.__logger.info("Get or create flavor for vrouter") flavor_settings = FlavorSettings( name=self.vnf['requirements']['flavor']['name'], ram=self.vnf['requirements']['flavor']['ram_min'], disk=25, vcpus=1) flavor_creator = OpenStackFlavor(self.snaps_creds, flavor_settings) flavor = flavor_creator.create() self.created_object.append(flavor_creator) # set image name glance = glance_utils.glance_client(self.snaps_creds) image = glance_utils.get_image(glance, "vyos1.1.7") self.vnf['inputs'].update(dict(target_vnf_image_id=image.id)) self.vnf['inputs'].update(dict(reference_vnf_image_id=image.id)) # set flavor id self.vnf['inputs'].update(dict(target_vnf_flavor_id=flavor.id)) self.vnf['inputs'].update(dict(reference_vnf_flavor_id=flavor.id)) self.vnf['inputs'].update(dict(keystone_username=self.tenant_name)) self.vnf['inputs'].update(dict(keystone_password=self.tenant_name)) self.vnf['inputs'].update(dict(keystone_tenant_name=self.tenant_name)) self.vnf['inputs'].update( dict(keystone_url=os_utils.get_endpoint('identity'))) self.__logger.info("Create VNF Instance") cfy_client.deployments.create(descriptor.get('name'), descriptor.get('name'), self.vnf.get('inputs')) wait_for_execution(cfy_client, get_execution_id(cfy_client, descriptor.get('name')), self.__logger, timeout=7200) self.__logger.info("Start the VNF Instance deployment") execution = cfy_client.executions.start(descriptor.get('name'), 'install') # Show execution log execution = wait_for_execution(cfy_client, execution, self.__logger) duration = time.time() - start_time self.__logger.info(execution) if execution.status == 'terminated': self.details['vnf'].update(status='PASS', duration=duration) result = True else: self.details['vnf'].update(status='FAIL', duration=duration) result = False return result
def deploy_orchestrator(self): """ Deploy Cloudify Manager. network, security group, fip, VM creation """ # network creation start_time = time.time() self.__logger.info("Creating keypair ...") kp_file = os.path.join(self.data_dir, "cloudify_vrouter.pem") keypair_settings = KeypairSettings(name='cloudify_vrouter_kp', private_filepath=kp_file) keypair_creator = OpenStackKeypair(self.snaps_creds, keypair_settings) keypair_creator.create() self.created_object.append(keypair_creator) self.__logger.info("Creating full network ...") subnet_settings = SubnetSettings(name='cloudify_vrouter_subnet', cidr='10.67.79.0/24') network_settings = NetworkSettings(name='cloudify_vrouter_network', subnet_settings=[subnet_settings]) network_creator = OpenStackNetwork(self.snaps_creds, network_settings) network_creator.create() self.created_object.append(network_creator) ext_net_name = snaps_utils.get_ext_net_name(self.snaps_creds) router_creator = OpenStackRouter( self.snaps_creds, RouterSettings(name='cloudify_vrouter_router', external_gateway=ext_net_name, internal_subnets=[subnet_settings.name])) router_creator.create() self.created_object.append(router_creator) # security group creation self.__logger.info("Creating security group for cloudify manager vm") sg_rules = list() sg_rules.append( SecurityGroupRuleSettings(sec_grp_name="sg-cloudify-manager", direction=Direction.ingress, protocol=Protocol.tcp, port_range_min=1, port_range_max=65535)) sg_rules.append( SecurityGroupRuleSettings(sec_grp_name="sg-cloudify-manager", direction=Direction.ingress, protocol=Protocol.udp, port_range_min=1, port_range_max=65535)) security_group_creator = OpenStackSecurityGroup( self.snaps_creds, SecurityGroupSettings(name="sg-cloudify-manager", rule_settings=sg_rules)) security_group_creator.create() self.created_object.append(security_group_creator) # orchestrator VM flavor self.__logger.info("Get or create flavor for cloudify manager vm ...") flavor_settings = FlavorSettings( name=self.orchestrator['requirements']['flavor']['name'], ram=self.orchestrator['requirements']['flavor']['ram_min'], disk=50, vcpus=2) flavor_creator = OpenStackFlavor(self.snaps_creds, flavor_settings) flavor_creator.create() self.created_object.append(flavor_creator) image_settings = ImageSettings( name=self.orchestrator['requirements']['os_image'], image_user='******', exists=True) port_settings = PortSettings(name='cloudify_manager_port', network_name=network_settings.name) manager_settings = VmInstanceSettings( name='cloudify_manager', flavor=flavor_settings.name, port_settings=[port_settings], security_group_names=[ security_group_creator.sec_grp_settings.name ], floating_ip_settings=[ FloatingIpSettings( name='cloudify_manager_fip', port_name=port_settings.name, router_name=router_creator.router_settings.name) ]) manager_creator = OpenStackVmInstance(self.snaps_creds, manager_settings, image_settings, keypair_settings) self.__logger.info("Creating cloudify manager VM") manager_creator.create() self.created_object.append(manager_creator) public_auth_url = os_utils.get_endpoint('identity') self.__logger.info("Set creds for cloudify manager") cfy_creds = dict(keystone_username=self.tenant_name, keystone_password=self.tenant_name, keystone_tenant_name=self.tenant_name, keystone_url=public_auth_url) cfy_client = CloudifyClient(host=manager_creator.get_floating_ip().ip, username='******', password='******', tenant='default_tenant') self.orchestrator['object'] = cfy_client self.cfy_manager_ip = manager_creator.get_floating_ip().ip self.__logger.info("Attemps running status of the Manager") cfy_status = None retry = 10 while str(cfy_status) != 'running' and retry: try: cfy_status = cfy_client.manager.get_status()['status'] self.__logger.debug("The current manager status is %s", cfy_status) except Exception: # pylint: disable=broad-except self.__logger.warning("Cloudify Manager isn't " + "up and running. Retrying ...") retry = retry - 1 time.sleep(30) if str(cfy_status) == 'running': self.__logger.info("Cloudify Manager is up and running") else: raise Exception("Cloudify Manager isn't up and running") self.__logger.info("Put OpenStack creds in manager") secrets_list = cfy_client.secrets.list() for k, val in cfy_creds.iteritems(): if not any(d.get('key', None) == k for d in secrets_list): cfy_client.secrets.create(k, val) else: cfy_client.secrets.update(k, val) duration = time.time() - start_time self.__logger.info("Put private keypair in manager") if manager_creator.vm_ssh_active(block=True): ssh = manager_creator.ssh_client() scp = SCPClient(ssh.get_transport(), socket_timeout=15.0) scp.put(kp_file, '~/') cmd = "sudo cp ~/cloudify_vrouter.pem /etc/cloudify/" run_blocking_ssh_command(ssh, cmd) cmd = "sudo chmod 444 /etc/cloudify/cloudify_vrouter.pem" run_blocking_ssh_command(ssh, cmd) cmd = "sudo yum install -y gcc python-devel" run_blocking_ssh_command(ssh, cmd, "Unable to install packages on manager") self.details['orchestrator'].update(status='PASS', duration=duration) self.vnf['inputs'].update(dict(external_network_name=ext_net_name)) return True
def deploy_orchestrator(self, **kwargs): self.logger.info("Additional pre-configuration steps") self.neutron_client = os_utils.get_neutron_client(self.admin_creds) self.glance_client = os_utils.get_glance_client(self.admin_creds) self.keystone_client = os_utils.get_keystone_client(self.admin_creds) self.nova_client = os_utils.get_nova_client(self.admin_creds) # needs some images self.logger.info("Upload some OS images if it doesn't exist") temp_dir = os.path.join(self.data_dir, "tmp/") for image_name, image_url in self.images.iteritems(): self.logger.info("image: %s, url: %s" % (image_name, image_url)) try: image_id = os_utils.get_image_id(self.glance_client, image_name) self.logger.debug("image_id: %s" % image_id) except: self.logger.error("Unexpected error: %s" % sys.exc_info()[0]) if image_id == '': self.logger.info( """%s image doesn't exist on glance repository. Try downloading this image and upload on glance !""" % image_name) image_id = download_and_add_image_on_glance( self.glance_client, image_name, image_url, temp_dir) if image_id == '': self.step_failure("Failed to find or upload required OS " "image for this deployment") # Need to extend quota self.logger.info("Update security group quota for this tenant") tenant_id = os_utils.get_tenant_id(self.keystone_client, self.tenant_name) self.logger.debug("Tenant id found %s" % tenant_id) if not os_utils.update_sg_quota(self.neutron_client, tenant_id, 50, 100): self.step_failure("Failed to update security group quota" + " for tenant " + self.tenant_name) self.logger.debug("group quota extended") # start the deployment of cloudify public_auth_url = os_utils.get_endpoint('identity') self.logger.debug("CFY inputs: %s" % self.orchestrator['inputs']) cfy = Orchestrator(self.data_dir, self.orchestrator['inputs']) self.orchestrator['object'] = cfy self.logger.debug("Orchestrator object created") self.logger.debug("Tenant name: %s" % self.tenant_name) cfy.set_credentials(username=self.tenant_name, password=self.tenant_name, tenant_name=self.tenant_name, auth_url=public_auth_url) self.logger.info("Credentials set in CFY") # orchestrator VM flavor self.logger.info("Check Flavor is available, if not create one") self.logger.debug("Flavor details %s " % self.orchestrator['requirements']['ram_min']) flavor_exist, flavor_id = os_utils.get_or_create_flavor( "m1.large", self.orchestrator['requirements']['ram_min'], '1', '1', public=True) self.logger.debug("Flavor id: %s" % flavor_id) if not flavor_id: self.logger.info("Available flavors are: ") self.logger.info(self.nova_client.flavor.list()) self.step_failure("Failed to find required flavor" "for this deployment") cfy.set_flavor_id(flavor_id) self.logger.debug("Flavor OK") # orchestrator VM image self.logger.debug("Orchestrator image") if 'os_image' in self.orchestrator['requirements'].keys(): image_id = os_utils.get_image_id( self.glance_client, self.orchestrator['requirements']['os_image']) self.logger.debug("Orchestrator image id: %s" % image_id) if image_id == '': self.logger.error("CFY image not found") self.step_failure("Failed to find required OS image" " for cloudify manager") else: self.step_failure("Failed to find required OS image" " for cloudify manager") cfy.set_image_id(image_id) self.logger.debug("Orchestrator image set") self.logger.debug("Get External network") ext_net = os_utils.get_external_net(self.neutron_client) self.logger.debug("External network: %s" % ext_net) if not ext_net: self.step_failure("Failed to get external network") cfy.set_external_network_name(ext_net) self.logger.debug("CFY External network set") self.logger.debug("get resolvconf") ns = ft_utils.get_resolvconf_ns() if ns: cfy.set_nameservers(ns) self.logger.debug("Resolvconf set") self.logger.info("Prepare virtualenv for cloudify-cli") cmd = "chmod +x " + self.case_dir + "create_venv.sh" ft_utils.execute_command(cmd) time.sleep(3) cmd = self.case_dir + "create_venv.sh " + self.data_dir ft_utils.execute_command(cmd) cfy.download_manager_blueprint( self.orchestrator['blueprint']['url'], self.orchestrator['blueprint']['branch']) cfy.deploy_manager() return {'status': 'PASS', 'result': ''}
def init_performance_testToplogy(self, tplgy, performance_test_config): tplgy.delete_config() vnf_list = performance_test_config["vnf_list"] target_vnf = self.util.get_vnf_info(vnf_list, "target_vnf") tester_vm = self.util.get_vnf_info(vnf_list, "tester_vm") target_vnf_image_name = "" if "image_name" in target_vnf: target_vnf_image_name = target_vnf["image_name"] target_vnf_flavor_name = "" if "flavor_name" in target_vnf: target_vnf_flavor_name = target_vnf["flavor_name"] self.logger.debug("target_vnf image name : " + target_vnf_image_name) self.logger.debug("target_vnf flavor name : " + target_vnf_flavor_name) tester_vm_image_name = "" if "image_name" in tester_vm: tester_vm_image_name = tester_vm["image_name"] tester_vm_flavor_name = "" if "flavor_name" in tester_vm: tester_vm_flavor_name = tester_vm["flavor_name"] self.logger.debug("tester vm image name : " + tester_vm_image_name) self.logger.debug("tester vm flavor name : " + tester_vm_flavor_name) nova = os_utils.get_nova_client() # Setting the flavor id for target vnf. target_vnf_flavor_id = os_utils.get_flavor_id(nova, target_vnf_flavor_name) if target_vnf_flavor_id == '': for default in self.PERFORMANCE_TEST_TPLGY_DEFAULT: if default == 'ram_min': target_vnf_flavor_id = os_utils.get_flavor_id_by_ram_range( nova, self.PERFORMANCE_TEST_TPLGY_DEFAULT['ram_min'], VNF_MAX_RAM_SIZE) if target_vnf_flavor_id == '': return self.step_failure( "making_testTopology", "Error : Failed to find flavor for target vnf") tplgy.set_target_vnf_flavor_id(target_vnf_flavor_id) # Setting the flavor id for tester vm. tester_vm_flavor_id = os_utils.get_flavor_id(nova, tester_vm_flavor_name) if tester_vm_flavor_id == '': for default in self.PERFORMANCE_TEST_TPLGY_DEFAULT: if default == 'ram_min': tester_vm_flavor_id = os_utils.get_flavor_id_by_ram_range( nova, self.PERFORMANCE_TEST_TPLGY_DEFAULT['ram_min'], VNF_MAX_RAM_SIZE) if tester_vm_flavor_id == '': return self.step_failure( "making_testTopology", "Error : Failed to find flavor for tester vm") tplgy.set_send_tester_vm_flavor_id(tester_vm_flavor_id) tplgy.set_receive_tester_vm_flavor_id(tester_vm_flavor_id) # Setting the image id for target vnf. target_vnf_image_id = os_utils.get_image_id(self.glance, target_vnf_image_name) if target_vnf_image_id == '': for default in self.PERFORMANCE_TEST_TPLGY_DEFAULT: if default == 'vnf_os_image': target_vnf_image_id = os_utils.get_image_id( self.glance, self.PERFORMANCE_TEST_TPLGY_DEFAULT['vnf_os_image']) if target_vnf_image_id == '': return self.step_failure( "making_testTopology", "Error : Failed to find required OS image for target vnf") tplgy.set_target_vnf_image_id(target_vnf_image_id) # Setting the image id for target vnf. tester_vm_image_id = os_utils.get_image_id(self.glance, tester_vm_image_name) if tester_vm_image_id == '': for default in self.PERFORMANCE_TEST_TPLGY_DEFAULT: if default == 'tester_os_image': tester_vm_image_id = os_utils.get_image_id( self.glance, self.PERFORMANCE_TEST_TPLGY_DEFAULT['tester_os_image']) if tester_vm_image_id == '': return self.step_failure( "making_testTopology", "Error : Failed to find required OS image for tester vm") tplgy.set_send_tester_vm_image_id(tester_vm_image_id) tplgy.set_receive_tester_vm_image_id(tester_vm_image_id) tplgy.set_region(REGION_NAME) ext_net = os_utils.get_external_net(self.neutron) if not ext_net: return self.step_failure("making_testTopology", "Failed to get external network") tplgy.set_external_network_name(ext_net) tplgy.set_credentials(username=self.ks_cresds['username'], password=self.ks_cresds['password'], tenant_name=self.ks_cresds['tenant_name'], auth_url=os_utils.get_endpoint('identity')) return self.set_resultdata(self.testcase_start_time, "", "", self.results)
def init_function_testToplogy(self, tplgy, function_test_config): tplgy.delete_config() self.logger.info("Collect flavor id for all topology vnf") vnf_list = function_test_config["vnf_list"] target_vnf = self.util.get_vnf_info(vnf_list, "target_vnf") reference_vnf = self.util.get_vnf_info(vnf_list, "reference_vnf") target_vnf_image_name = "" if "image_name" in target_vnf: target_vnf_image_name = target_vnf["image_name"] target_vnf_flavor_name = "" if "flavor_name" in target_vnf: target_vnf_flavor_name = target_vnf["flavor_name"] self.logger.debug("target_vnf image name : " + target_vnf_image_name) self.logger.debug("target_vnf flavor name : " + target_vnf_flavor_name) reference_vnf_image_name = "" if "image_name" in reference_vnf: reference_vnf_image_name = reference_vnf["image_name"] reference_vnf_flavor_name = "" if "flavor_name" in reference_vnf: reference_vnf_flavor_name = reference_vnf["flavor_name"] self.logger.debug("reference_vnf image name : " + reference_vnf_image_name) self.logger.debug("reference_vnf flavor name : " + reference_vnf_flavor_name) nova = os_utils.get_nova_client() # Setting the flavor id for target vnf. target_vnf_flavor_id = os_utils.get_flavor_id(nova, target_vnf_flavor_name) if target_vnf_flavor_id == '': for default in self.FUNCTION_TEST_TPLGY_DEFAULT: if default == 'ram_min': target_vnf_flavor_id = os_utils.get_flavor_id_by_ram_range( nova, self.FUNCTION_TEST_TPLGY_DEFAULT['ram_min'], VNF_MAX_RAM_SIZE) self.logger.info("target_vnf_flavor_id id search set") if target_vnf_flavor_id == '': return self.step_failure( "making_testTopology", "Error : Failed to find flavor for target vnf") tplgy.set_target_vnf_flavor_id(target_vnf_flavor_id) # Setting the flavor id for reference vnf. reference_vnf_flavor_id = os_utils.get_flavor_id( nova, reference_vnf_flavor_name) if reference_vnf_flavor_id == '': for default in self.FUNCTION_TEST_TPLGY_DEFAULT: if default == 'ram_min': reference_vnf_flavor_id = \ os_utils.get_flavor_id_by_ram_range( nova, self.FUNCTION_TEST_TPLGY_DEFAULT['ram_min'], VNF_MAX_RAM_SIZE) self.logger.info("reference_vnf_flavor_id id search set") if reference_vnf_flavor_id == '': return self.step_failure( "making_testTopology", "Error : Failed to find flavor for tester vm") tplgy.set_reference_vnf_flavor_id(reference_vnf_flavor_id) # Setting the image id for target vnf. target_vnf_image_id = os_utils.get_image_id(self.glance, target_vnf_image_name) if target_vnf_image_id == '': for default in self.FUNCTION_TEST_TPLGY_DEFAULT: if default == 'os_image': target_vnf_image_id = os_utils.get_image_id( self.glance, self.FUNCTION_TEST_TPLGY_DEFAULT['os_image']) if target_vnf_image_id == '': return self.step_failure( "making_testTopology", "Error : Failed to find required OS image for target vnf") tplgy.set_target_vnf_image_id(target_vnf_image_id) # Setting the image id for reference vnf. reference_vnf_image_id = os_utils.get_image_id( self.glance, reference_vnf_image_name) if reference_vnf_image_id == '': for default in self.FUNCTION_TEST_TPLGY_DEFAULT: if default == 'os_image': reference_vnf_image_id = os_utils.get_image_id( self.glance, self.FUNCTION_TEST_TPLGY_DEFAULT['os_image']) if reference_vnf_image_id == '': return self.step_failure( "making_testTopology", "Error : Failed to find required OS image for reference vnf.") tplgy.set_reference_vnf_image_id(reference_vnf_image_id) tplgy.set_region(REGION_NAME) ext_net = os_utils.get_external_net(self.neutron) if not ext_net: return self.step_failure("making_testTopology", "Failed to get external network") tplgy.set_external_network_name(ext_net) tplgy.set_credentials(username=self.ks_cresds['username'], password=self.ks_cresds['password'], tenant_name=self.ks_cresds['tenant_name'], auth_url=os_utils.get_endpoint('identity')) return self.set_resultdata(self.testcase_start_time, "", "", self.results)
def deploy_cloudify(self, cfy): username = self.ks_cresds['username'] password = self.ks_cresds['password'] tenant_name = self.ks_cresds['tenant_name'] auth_url = os_utils.get_endpoint('identity') self.logger.debug("auth_url = %s" % auth_url) cfy.set_credentials(username, password, tenant_name, auth_url) self.logger.info("Collect flavor id for cloudify manager server") nova = os_utils.get_nova_client() flavor_name = "m1.large" flavor_id = os_utils.get_flavor_id(nova, flavor_name) for requirement in CFY_MANAGER_REQUIERMENTS: if requirement == 'ram_min': flavor_id = os_utils.get_flavor_id_by_ram_range( nova, CFY_MANAGER_REQUIERMENTS['ram_min'], CFY_MANAGER_MAX_RAM_SIZE) if flavor_id == '': self.logger.error("Failed to find %s flavor. " "Try with ram range default requirement !" % flavor_name) flavor_id = os_utils.get_flavor_id_by_ram_range(nova, 4000, 8196) if flavor_id == '': return self.step_failure( "making_orchestrator", "Failed to find required flavor for this deployment") cfy.set_flavor_id(flavor_id) image_name = "centos_7" image_id = os_utils.get_image_id(self.glance, image_name) for requirement in CFY_MANAGER_REQUIERMENTS: if requirement == 'os_image': image_id = os_utils.get_image_id( self.glance, CFY_MANAGER_REQUIERMENTS['os_image']) if image_id == '': return self.step_failure( "making_orchestrator", "Error : Failed to find required OS image for cloudify manager" ) cfy.set_image_id(image_id) ext_net = os_utils.get_external_net(self.neutron) if not ext_net: return self.step_failure("making_orchestrator", "Failed to get external network") cfy.set_external_network_name(ext_net) ns = functest_utils.get_resolvconf_ns() if ns: cfy.set_nameservers(ns) self.logger.info("Prepare virtualenv for cloudify-cli") cmd = "chmod +x " + VNF_DIR + "create_venv.sh" functest_utils.execute_command(cmd, self.logger) time.sleep(3) cmd = VNF_DIR + "create_venv.sh " + self.util.VNF_DATA_DIR functest_utils.execute_command(cmd, self.logger) cfy.download_manager_blueprint(CFY_MANAGER_BLUEPRINT['url'], CFY_MANAGER_BLUEPRINT['branch']) # ############### CLOUDIFY DEPLOYMENT ################ start_time_ts = time.time() self.logger.info("Cloudify deployment Start Time:'%s'" % (datetime.datetime.fromtimestamp( start_time_ts).strftime('%Y-%m-%d %H:%M:%S'))) error = cfy.deploy_manager() if error: return self.step_failure("making_orchestrator", error) end_time_ts = time.time() duration = round(end_time_ts - start_time_ts, 1) self.logger.info("Cloudify deployment duration:'%s'" % duration) self.set_result("making_orchestrator", duration, "OK") return self.set_resultdata(self.testcase_start_time, "", "", self.results)
def init(self): start_time_ts = time.time() self.util = utilvnf(self.logger) self.ks_cresds = os_utils.get_credentials() self.logger.info("Prepare OpenStack plateform(create tenant and user)") keystone = os_utils.get_keystone_client() user_id = os_utils.get_user_id(keystone, self.ks_cresds['username']) if user_id == '': return self.step_failure( "init", "Error : Failed to get id of " + self.ks_cresds['username']) tenant_id = os_utils.create_tenant(keystone, TENANT_NAME, TENANT_DESCRIPTION) if tenant_id == '': return self.step_failure( "init", "Error : Failed to create " + TENANT_NAME + " tenant") roles_name = ["admin", "Admin"] role_id = '' for role_name in roles_name: if role_id == '': role_id = os_utils.get_role_id(keystone, role_name) if role_id == '': self.logger.error("Error : Failed to get id for %s role" % role_name) if not os_utils.add_role_user(keystone, user_id, role_id, tenant_id): self.logger.error("Error : Failed to add %s on tenant" % self.ks_cresds['username']) user_id = os_utils.create_user(keystone, TENANT_NAME, TENANT_NAME, None, tenant_id) if user_id == '': self.logger.error("Error : Failed to create %s user" % TENANT_NAME) if not os_utils.add_role_user(keystone, user_id, role_id, tenant_id): self.logger.error("Failed to add %s on tenant" % TENANT_NAME) self.logger.info("Update OpenStack creds informations") self.ks_cresds.update({ "tenant_name": TENANT_NAME, }) self.neutron = os_utils.get_neutron_client(self.ks_cresds) nova = os_utils.get_nova_client() self.glance = os_utils.get_glance_client(self.ks_cresds) self.ks_cresds.update({ "username": TENANT_NAME, "password": TENANT_NAME, }) self.load_test_env_config() self.logger.info("Upload some OS images if it doesn't exist") images = {} images.update(IMAGES) images.update(self.VNF_TEST_IMAGES) for img in images.keys(): image_name = images[img]['image_name'] self.logger.info("image name = " + image_name) image_url = images[img]['image_url'] image_id = os_utils.get_image_id(self.glance, image_name) if image_id == '': self.logger.info( """%s image doesn't exist on glance repository. Try downloading this image and upload on glance !""" % image_name) image_id = self.download_and_add_image_on_glance( self.glance, image_name, image_url) if image_id == '': return self.step_failure( "init", "Error : Failed to find or upload required OS " "image for this deployment") self.logger.info("Update security group quota for this tenant") result = os_utils.update_sg_quota(self.neutron, tenant_id, 50, 100) if not result: return self.step_failure( "init", "Failed to update security group quota for tenant " + TENANT_NAME) self.credentials = { "username": TENANT_NAME, "password": TENANT_NAME, "auth_url": os_utils.get_endpoint('identity'), "tenant_name": TENANT_NAME, "region_name": os.environ['OS_REGION_NAME'] } self.util.set_credentials(self.credentials["username"], self.credentials["password"], self.credentials["auth_url"], self.credentials["tenant_name"], self.credentials["region_name"]) test_scenario_file = open(self.util.TEST_SCENATIO_YAML_FILE, 'r') self.test_scenario_yaml = yaml.safe_load(test_scenario_file) test_scenario_file.close() res = self.util.test_scenario_validation_check(self.test_scenario_yaml) if res["status"] is False: self.logger.error(res["message"]) return self.step_failure("init", "Error : Faild to test execution.") self.logger.info("Test scenario yaml validation check : " + res["message"]) end_time_ts = time.time() duration = round(end_time_ts - start_time_ts, 1) self.set_result("init", duration, "OK") return self.set_resultdata(self.testcase_start_time, "", "", self.results)
def GetIp(type): # cmd = "openstack catalog show " + type + " | grep publicURL" url = openstack_utils.get_endpoint(service_type=type) return urlparse.urlparse(url).hostname