def get_vim_descriptor(self): """"Create VIM descriptor to be used for onboarding""" self.logger.info("Building VIM descriptor with PoP creds: %s", self.creds) # Depending on API version either tenant ID or project name must be # used if os_utils.is_keystone_v3(): self.logger.info( "Using v3 API of OpenStack... -> Using OS_PROJECT_ID") project_id = os_utils.get_tenant_id(os_utils.get_keystone_client(), self.creds.get("project_name")) else: self.logger.info( "Using v2 API of OpenStack... -> Using OS_TENANT_NAME") project_id = self.creds.get("tenant_name") self.logger.debug("VIM project/tenant id: %s", project_id) vim_json = { "name": "vim-instance", "authUrl": self.creds.get("auth_url"), "tenant": project_id, "username": self.creds.get("username"), "password": self.creds.get("password"), "securityGroups": [self.mano['details']['sec_group']], "type": "openstack", "location": { "name": "opnfv", "latitude": "52.525876", "longitude": "13.314400" } } self.logger.info("Built VIM descriptor: %s", vim_json) return vim_json
def clean_enviroment(self, cfy): # ########### CLOUDIFY UNDEPLOYMENT ############# cfy.undeploy_manager() # ############## TNENANT CLEANUP ################ self.ks_cresds = os_utils.get_credentials() self.logger.info("Removing %s tenant .." % CFY_INPUTS['keystone_tenant_name']) keystone = os_utils.get_keystone_client() tenant_id = os_utils.get_tenant_id(keystone, CFY_INPUTS['keystone_tenant_name']) if tenant_id == '': self.logger.error("Error : Failed to get id of %s tenant" % CFY_INPUTS['keystone_tenant_name']) else: resulut = os_utils.delete_tenant(keystone, tenant_id) if not resulut: self.logger.error("Error : Failed to remove %s tenant" % CFY_INPUTS['keystone_tenant_name']) self.logger.info("Removing %s user .." % CFY_INPUTS['keystone_username']) user_id = os_utils.get_user_id(keystone, CFY_INPUTS['keystone_username']) if user_id == '': self.logger.error("Error : Failed to get id of %s user" % CFY_INPUTS['keystone_username']) else: result = os_utils.delete_user(keystone, user_id) if not result: self.logger.error("Error : Failed to remove %s user" % CFY_INPUTS['keystone_username']) return self.set_resultdata(self.testcase_start_time, "", "", self.results)
def clean(self): """Clean created objects/functions.""" try: if not self.orchestrator['requirements']['preserve_setup']: self.__logger.info("Removing deployment files...") testresult = os.path.join(self.case_dir, 'TestResults.json') if os.path.exists(testresult): os.remove(testresult) self.__logger.info("Removing %s file ", self.filename) if os.path.exists(self.filename): os.remove(self.filename) self.__logger.info("Destroying Orchestrator...") os.system('juju destroy-controller -y abot-controller ' '--destroy-all-models') except Exception: # pylint: disable=broad-except self.__logger.warn("Some issue during the undeployment ..") self.__logger.warn("Tenant clean continue ..") if not self.orchestrator['requirements']['preserve_setup']: self.__logger.info('Remove the Abot_epc OS object ..') for creator in reversed(self.created_object): try: creator.clean() except Exception as exc: # pylint: disable=broad-except self.__logger.error('Unexpected error cleaning - %s', exc) self.__logger.info("Releasing all the floating IPs") floating_ips = os_utils.get_floating_ips(self.neutron_client) tenant_id = os_utils.get_tenant_id(self.keystone_client, self.tenant_name) self.__logger.info("TENANT ID : %s", tenant_id) for item in floating_ips: if item['tenant_id'] == tenant_id: os_utils.delete_floating_ip(self.neutron_client, item['id']) self.__logger.info("Cleaning Projects and Users") for creator in reversed(self.created_object): try: creator.clean() except Exception as exc: # pylint: disable=broad-except self.__logger.error('Unexpected error cleaning - %s', exc) return True
def clean(self): try: if not self.orchestrator['requirements']['preserve_setup']: self.__logger.info("Removing deployment files...") os.system('rm -f -- {}'.format(self.case_dir + '/' + 'TestResults.json')) os.system("sed -i '/project-domain-name/Q' {}/abot_epc" "_credential.yaml".format(self.case_dir)) self.__logger.info("Destroying Orchestrator...") os.system('juju destroy-controller -y abot-controller ' '--destroy-all-models') self.__logger.info("Uninstalling dependency packages...") except: self.__logger.warn("Some issue during the undeployment ..") self.__logger.warn("Tenant clean continue ..") if not self.orchestrator['requirements']['preserve_setup']: self.__logger.info('Remove the Abot_epc OS object ..') for creator in reversed(self.created_object): try: creator.clean() except Exception as exc: self.__logger.error('Unexpected error cleaning - %s', exc) self.__logger.info("Releasing all the floating IPs") user_id = os_utils.get_user_id(self.keystone_client, self.tenant_name) floating_ips = os_utils.get_floating_ips(self.neutron_client) tenant_id = os_utils.get_tenant_id(self.keystone_client, self.tenant_name) self.__logger.info("USER ID : %s", user_id) self.__logger.info("FLOATING IP : %s", floating_ips) self.__logger.info("TENANT ID : %s", tenant_id) for item in floating_ips: if item['tenant_id'] == tenant_id: os_utils.delete_floating_ip(self.neutron_client, item['id']) self.__logger.info("Cleaning Projects and Users") super(JujuEpc, self).clean() return True
def prepare_floating_ip(self): """Select/Create Floating IP if it doesn't exist yet""" self.logger.info("Retrieving floating IP for Open Baton NFVO") neutron_client = snaps_utils.neutron_utils.neutron_client( self.snaps_creds) # Finding Tenant ID to check to which tenant the Floating IP belongs tenant_id = os_utils.get_tenant_id( os_utils.get_keystone_client(self.creds), self.tenant_name) # Use os_utils to retrieve complete information of Floating IPs floating_ips = os_utils.get_floating_ips(neutron_client) my_floating_ips = [] # Filter Floating IPs with tenant id for floating_ip in floating_ips: # self.logger.info("Floating IP: %s", floating_ip) if floating_ip.get('tenant_id') == tenant_id: my_floating_ips.append(floating_ip.get('floating_ip_address')) # Select if Floating IP exist else create new one if len(my_floating_ips) >= 1: # Get Floating IP object from snaps for clean up snaps_floating_ips = snaps_utils.neutron_utils.get_floating_ips( neutron_client) for my_floating_ip in my_floating_ips: for snaps_floating_ip in snaps_floating_ips: if snaps_floating_ip.ip == my_floating_ip: self.mano['details']['fip'] = snaps_floating_ip self.logger.info( "Selected floating IP for Open Baton NFVO %s", (self.mano['details']['fip'].ip)) break if self.mano['details']['fip'] is not None: break else: self.logger.info("Creating floating IP for Open Baton NFVO") self.mano['details']['fip'] = ( snaps_utils.neutron_utils. create_floating_ip( neutron_client, self.mano['details']['external_net_name'])) self.logger.info( "Created floating IP for Open Baton NFVO %s", (self.mano['details']['fip'].ip))
def clean(self): self.logger.info("test cleaning") self.logger.info("Removing %s tenant .." % self.tenant_name) tenant_id = os_utils.get_tenant_id(self.keystone_client, self.tenant_name) if tenant_id == '': self.logger.error("Error : Failed to get id of %s tenant" % self.tenant_name) else: if not os_utils.delete_tenant(self.keystone_client, tenant_id): self.logger.error("Error : Failed to remove %s tenant" % self.tenant_name) self.logger.info("Removing %s user .." % self.tenant_name) user_id = os_utils.get_user_id(self.keystone_client, self.tenant_name) if user_id == '': self.logger.error("Error : Failed to get id of %s user" % self.tenant_name) else: if not os_utils.delete_user(self.keystone_client, user_id): self.logger.error("Error : Failed to remove %s user" % self.tenant_name)
def deploy_vnf(self): self.logger.info("Starting vIMS Deployment...") self.main_agent = MainAgent(nfvo_ip=self.ob_ip, nfvo_port=self.ob_port, https=self.ob_https, version=1, username=self.ob_username, password=self.ob_password) self.logger.info( "Check if openims Flavor is available, if not create one") flavor_exist, flavor_id = os_utils.get_or_create_flavor("m1.small", "2048", '20', '1', public=True) self.logger.debug("Flavor id: %s", flavor_id) self.logger.info("Getting project 'default'...") project_agent = self.main_agent.get_agent("project", self.ob_projectid) for p in json.loads(project_agent.find()): if p.get("name") == "default": self.ob_projectid = p.get("id") self.logger.info("Found project 'default': %s", p) break self.logger.debug("project id: %s", self.ob_projectid) if self.ob_projectid == "": self.logger.error("Default project id was not found!") creds = os_utils.get_credentials() self.logger.info("PoP creds: %s", creds) if os_utils.is_keystone_v3(): self.logger.info( "Using v3 API of OpenStack... -> Using OS_PROJECT_ID") project_id = os_utils.get_tenant_id(os_utils.get_keystone_client(), creds.get("project_name")) else: self.logger.info( "Using v2 API of OpenStack... -> Using OS_TENANT_NAME") project_id = creds.get("tenant_name") self.logger.debug("project id: %s", project_id) vim_json = { "name": "vim-instance", "authUrl": creds.get("auth_url"), "tenant": project_id, "username": creds.get("username"), "password": creds.get("password"), "securityGroups": ["default", "orchestra-sec-group"], "type": "openstack", "location": { "name": "opnfv", "latitude": "52.525876", "longitude": "13.314400" } } self.logger.debug("Registering VIM: %s", vim_json) self.main_agent.get_agent( "vim", project_id=self.ob_projectid).create(entity=json.dumps(vim_json)) market_agent = self.main_agent.get_agent("market", project_id=self.ob_projectid) nsd = {} try: self.logger.info("sending: %s", self.market_link) nsd = market_agent.create(entity=self.market_link) self.logger.info("Onboarded NSD: " + nsd.get("name")) except NfvoException as e: self.logger.error(e.message) nsr_agent = self.main_agent.get_agent("nsr", project_id=self.ob_projectid) nsd_id = nsd.get('id') if nsd_id is None: self.logger.error("NSD not onboarded correctly") try: self.nsr = nsr_agent.create(nsd_id) except NfvoException as e: self.logger.error(e.message) if self.nsr.get('code') is not None: self.logger.error("vIMS cannot be deployed: %s -> %s", self.nsr.get('code'), self.nsr.get('message')) self.logger.error("vIMS cannot be deployed") i = 0 self.logger.info("Waiting for NSR to go to ACTIVE...") while self.nsr.get("status") != 'ACTIVE' and self.nsr.get( "status") != 'ERROR': i += 1 if i == 150: self.logger.error("INACTIVE NSR after %s sec..", 5 * i) time.sleep(5) self.nsr = json.loads(nsr_agent.find(self.nsr.get('id'))) if self.nsr.get("status") == 'ACTIVE': self.details["vnf"] = {'status': "PASS", 'result': self.nsr} self.logger.info("Deploy VNF: OK") else: self.details["vnf"] = {'status': "FAIL", 'result': self.nsr} self.logger.error(self.nsr) self.logger.error("Deploy VNF: ERROR") return False self.ob_nsr_id = self.nsr.get("id") self.logger.info( "Sleep for 60s to ensure that all services are up and running...") time.sleep(60) return True
def deploy_orchestrator(self, **kwargs): self.logger.info("Additional pre-configuration steps") self.neutron_client = os_utils.get_neutron_client(self.admin_creds) self.glance_client = os_utils.get_glance_client(self.admin_creds) self.keystone_client = os_utils.get_keystone_client(self.admin_creds) self.nova_client = os_utils.get_nova_client(self.admin_creds) # needs some images self.logger.info("Upload some OS images if it doesn't exist") temp_dir = os.path.join(self.data_dir, "tmp/") for image_name, image_url in self.images.iteritems(): self.logger.info("image: %s, url: %s" % (image_name, image_url)) try: image_id = os_utils.get_image_id(self.glance_client, image_name) self.logger.debug("image_id: %s" % image_id) except: self.logger.error("Unexpected error: %s" % sys.exc_info()[0]) if image_id == '': self.logger.info( """%s image doesn't exist on glance repository. Try downloading this image and upload on glance !""" % image_name) image_id = download_and_add_image_on_glance( self.glance_client, image_name, image_url, temp_dir) if image_id == '': self.step_failure("Failed to find or upload required OS " "image for this deployment") # Need to extend quota self.logger.info("Update security group quota for this tenant") tenant_id = os_utils.get_tenant_id(self.keystone_client, self.tenant_name) self.logger.debug("Tenant id found %s" % tenant_id) if not os_utils.update_sg_quota(self.neutron_client, tenant_id, 50, 100): self.step_failure("Failed to update security group quota" + " for tenant " + self.tenant_name) self.logger.debug("group quota extended") # start the deployment of cloudify public_auth_url = os_utils.get_endpoint('identity') self.logger.debug("CFY inputs: %s" % self.orchestrator['inputs']) cfy = Orchestrator(self.data_dir, self.orchestrator['inputs']) self.orchestrator['object'] = cfy self.logger.debug("Orchestrator object created") self.logger.debug("Tenant name: %s" % self.tenant_name) cfy.set_credentials(username=self.tenant_name, password=self.tenant_name, tenant_name=self.tenant_name, auth_url=public_auth_url) self.logger.info("Credentials set in CFY") # orchestrator VM flavor self.logger.info("Check Flavor is available, if not create one") self.logger.debug("Flavor details %s " % self.orchestrator['requirements']['ram_min']) flavor_exist, flavor_id = os_utils.get_or_create_flavor( "m1.large", self.orchestrator['requirements']['ram_min'], '1', '1', public=True) self.logger.debug("Flavor id: %s" % flavor_id) if not flavor_id: self.logger.info("Available flavors are: ") self.logger.info(self.nova_client.flavor.list()) self.step_failure("Failed to find required flavor" "for this deployment") cfy.set_flavor_id(flavor_id) self.logger.debug("Flavor OK") # orchestrator VM image self.logger.debug("Orchestrator image") if 'os_image' in self.orchestrator['requirements'].keys(): image_id = os_utils.get_image_id( self.glance_client, self.orchestrator['requirements']['os_image']) self.logger.debug("Orchestrator image id: %s" % image_id) if image_id == '': self.logger.error("CFY image not found") self.step_failure("Failed to find required OS image" " for cloudify manager") else: self.step_failure("Failed to find required OS image" " for cloudify manager") cfy.set_image_id(image_id) self.logger.debug("Orchestrator image set") self.logger.debug("Get External network") ext_net = os_utils.get_external_net(self.neutron_client) self.logger.debug("External network: %s" % ext_net) if not ext_net: self.step_failure("Failed to get external network") cfy.set_external_network_name(ext_net) self.logger.debug("CFY External network set") self.logger.debug("get resolvconf") ns = ft_utils.get_resolvconf_ns() if ns: cfy.set_nameservers(ns) self.logger.debug("Resolvconf set") self.logger.info("Prepare virtualenv for cloudify-cli") cmd = "chmod +x " + self.case_dir + "create_venv.sh" ft_utils.execute_command(cmd) time.sleep(3) cmd = self.case_dir + "create_venv.sh " + self.data_dir ft_utils.execute_command(cmd) cfy.download_manager_blueprint( self.orchestrator['blueprint']['url'], self.orchestrator['blueprint']['branch']) cfy.deploy_manager() return {'status': 'PASS', 'result': ''}
def execute(self): nova_client = os_utils.get_nova_client() neutron_client = os_utils.get_neutron_client() tenant_id = os_utils.get_tenant_id(os_utils.get_keystone_client(), os.environ['OS_PROJECT_NAME']) neutron_quota = test_utils.get_neutron_quota(neutron_client, tenant_id) (neutron_nw_quota, neutron_subnet_quota, neutron_port_quota) = (neutron_quota['network'], neutron_quota['subnet'], neutron_quota['port']) instances_quota = test_utils.get_nova_instances_quota(nova_client) self.__logger.info("Setting net/subnet/port quota to unlimited") test_utils.update_nw_subnet_port_quota( neutron_client, tenant_id, COMMON_CONFIG.neutron_nw_quota, COMMON_CONFIG.neutron_subnet_quota, COMMON_CONFIG.neutron_port_quota) # Workaround for # https://jira.opnfv.org/projects/SDNVPN/issues/SDNVPN-115 self.__logger.info("Setting instances quota class to unlimited") test_utils.update_instance_quota_class( nova_client, COMMON_CONFIG.nova_instances_quota_class) with open(COMMON_CONFIG.config_file) as f: config_yaml = yaml.safe_load(f) testcases = config_yaml.get("testcases") overall_status = "PASS" for tc in testcases: if testcases[tc]['enabled']: test_name = tc test_descr = testcases[tc]['description'] title = ("Running '%s - %s'" % (test_name, test_descr)) self.__logger.info(title) self.__logger.info("%s\n" % ("=" * len(title))) t = importlib.import_module(test_name, package=None) try: result = t.main() except Exception as ex: result = -1 self.__logger.info("Caught Exception in %s: %s Trace: %s" % (test_name, ex, traceback.format_exc())) if result < 0: status = "FAIL" overall_status = "FAIL" self.__logger.info("Testcase %s failed" % test_name) else: status = result.get("status") self.details.update({ test_name: { 'status': status, 'details': result.get("details") } }) self.__logger.info( "Results of test case '%s - %s':\n%s\n" % (test_name, test_descr, result)) if status == "FAIL": overall_status = "FAIL" self.__logger.info("Resetting subnet/net/port quota") test_utils.update_nw_subnet_port_quota(neutron_client, tenant_id, neutron_nw_quota, neutron_subnet_quota, neutron_port_quota) self.__logger.info("Resetting instances quota class") test_utils.update_instance_quota_class(nova_client, instances_quota) try: installer_type = str(os.environ['INSTALLER_TYPE'].lower()) if installer_type in ["fuel", "apex"]: gather_logs('overall') else: self.__logger.info("Skipping log gathering because installer" "type %s is neither fuel nor apex" % installer_type) except Exception as ex: self.__logger.error( ('Something went wrong in the Log gathering.' 'Ex: %s, Trace: %s') % (ex, traceback.format_exc())) if overall_status == "PASS": self.result = 100 return base.Feature.EX_OK return base.Feature.EX_RUN_ERROR
def main(): # ############### GENERAL INITIALISATION ################ if not os.path.exists(VIMS_DATA_DIR): os.makedirs(VIMS_DATA_DIR) ks_creds = os_utils.get_credentials("keystone") nv_creds = os_utils.get_credentials("nova") nt_creds = os_utils.get_credentials("neutron") logger.info("Prepare OpenStack plateform (create tenant and user)") keystone = ksclient.Client(**ks_creds) user_id = os_utils.get_user_id(keystone, ks_creds['username']) if user_id == '': step_failure("init", "Error : Failed to get id of " + ks_creds['username']) tenant_id = os_utils.create_tenant( keystone, TENANT_NAME, TENANT_DESCRIPTION) if tenant_id == '': step_failure("init", "Error : Failed to create " + TENANT_NAME + " tenant") roles_name = ["admin", "Admin"] role_id = '' for role_name in roles_name: if role_id == '': role_id = os_utils.get_role_id(keystone, role_name) if role_id == '': logger.error("Error : Failed to get id for %s role" % role_name) if not os_utils.add_role_user(keystone, user_id, role_id, tenant_id): logger.error("Error : Failed to add %s on tenant" % ks_creds['username']) user_id = os_utils.create_user( keystone, TENANT_NAME, TENANT_NAME, None, tenant_id) if user_id == '': logger.error("Error : Failed to create %s user" % TENANT_NAME) logger.info("Update OpenStack creds informations") ks_creds.update({ "username": TENANT_NAME, "password": TENANT_NAME, "tenant_name": TENANT_NAME, }) nt_creds.update({ "tenant_name": TENANT_NAME, }) nv_creds.update({ "project_id": TENANT_NAME, }) logger.info("Upload some OS images if it doesn't exist") glance_endpoint = keystone.service_catalog.url_for( service_type='image', endpoint_type='publicURL') glance = glclient.Client(1, glance_endpoint, token=keystone.auth_token) for img in IMAGES.keys(): image_name = IMAGES[img]['image_name'] image_url = IMAGES[img]['image_url'] image_id = os_utils.get_image_id(glance, image_name) if image_id == '': logger.info("""%s image doesn't exist on glance repository. Try downloading this image and upload on glance !""" % image_name) image_id = download_and_add_image_on_glance( glance, image_name, image_url) if image_id == '': step_failure( "init", "Error : Failed to find or upload required OS " "image for this deployment") nova = nvclient.Client("2", **nv_creds) logger.info("Update security group quota for this tenant") neutron = ntclient.Client(**nt_creds) if not os_utils.update_sg_quota(neutron, tenant_id, 50, 100): step_failure( "init", "Failed to update security group quota for tenant " + TENANT_NAME) logger.info("Update cinder quota for this tenant") from cinderclient import client as cinderclient creds_cinder = os_utils.get_credentials("cinder") cinder_client = cinderclient.Client('1', creds_cinder['username'], creds_cinder['api_key'], creds_cinder['project_id'], creds_cinder['auth_url'], service_type="volume") if not os_utils.update_cinder_quota(cinder_client, tenant_id, 20, 10, 150): step_failure( "init", "Failed to update cinder quota for tenant " + TENANT_NAME) # ############### CLOUDIFY INITIALISATION ################ cfy = orchestrator(VIMS_DATA_DIR, CFY_INPUTS, logger) cfy.set_credentials(username=ks_creds['username'], password=ks_creds[ 'password'], tenant_name=ks_creds['tenant_name'], auth_url=ks_creds['auth_url']) logger.info("Collect flavor id for cloudify manager server") nova = nvclient.Client("2", **nv_creds) flavor_name = "m1.medium" flavor_id = os_utils.get_flavor_id(nova, flavor_name) for requirement in CFY_MANAGER_REQUIERMENTS: if requirement == 'ram_min': flavor_id = os_utils.get_flavor_id_by_ram_range( nova, CFY_MANAGER_REQUIERMENTS['ram_min'], 8196) if flavor_id == '': logger.error( "Failed to find %s flavor. " "Try with ram range default requirement !" % flavor_name) flavor_id = os_utils.get_flavor_id_by_ram_range(nova, 4000, 8196) if flavor_id == '': step_failure("orchestrator", "Failed to find required flavor for this deployment") cfy.set_flavor_id(flavor_id) image_name = "centos_7" image_id = os_utils.get_image_id(glance, image_name) for requirement in CFY_MANAGER_REQUIERMENTS: if requirement == 'os_image': image_id = os_utils.get_image_id( glance, CFY_MANAGER_REQUIERMENTS['os_image']) if image_id == '': step_failure( "orchestrator", "Error : Failed to find required OS image for cloudify manager") cfy.set_image_id(image_id) ext_net = os_utils.get_external_net(neutron) if not ext_net: step_failure("orchestrator", "Failed to get external network") cfy.set_external_network_name(ext_net) ns = functest_utils.get_resolvconf_ns() if ns: cfy.set_nameservers(ns) logger.info("Prepare virtualenv for cloudify-cli") cmd = "chmod +x " + VIMS_DIR + "create_venv.sh" functest_utils.execute_command(cmd, logger) time.sleep(3) cmd = VIMS_DIR + "create_venv.sh " + VIMS_DATA_DIR functest_utils.execute_command(cmd, logger) cfy.download_manager_blueprint( CFY_MANAGER_BLUEPRINT['url'], CFY_MANAGER_BLUEPRINT['branch']) # ############### CLOUDIFY DEPLOYMENT ################ start_time_ts = time.time() end_time_ts = start_time_ts logger.info("Cloudify deployment Start Time:'%s'" % ( datetime.datetime.fromtimestamp(start_time_ts).strftime( '%Y-%m-%d %H:%M:%S'))) error = cfy.deploy_manager() if error: step_failure("orchestrator", error) end_time_ts = time.time() duration = round(end_time_ts - start_time_ts, 1) logger.info("Cloudify deployment duration:'%s'" % duration) set_result("orchestrator", duration, "") # ############### CLEARWATER INITIALISATION ################ cw = clearwater(CW_INPUTS, cfy, logger) logger.info("Collect flavor id for all clearwater vm") nova = nvclient.Client("2", **nv_creds) flavor_name = "m1.small" flavor_id = os_utils.get_flavor_id(nova, flavor_name) for requirement in CW_REQUIERMENTS: if requirement == 'ram_min': flavor_id = os_utils.get_flavor_id_by_ram_range( nova, CW_REQUIERMENTS['ram_min'], 8196) if flavor_id == '': logger.error( "Failed to find %s flavor. Try with ram range " "default requirement !" % flavor_name) flavor_id = os_utils.get_flavor_id_by_ram_range(nova, 4000, 8196) if flavor_id == '': step_failure( "vIMS", "Failed to find required flavor for this deployment") cw.set_flavor_id(flavor_id) image_name = "ubuntu_14.04" image_id = os_utils.get_image_id(glance, image_name) for requirement in CW_REQUIERMENTS: if requirement == 'os_image': image_id = os_utils.get_image_id( glance, CW_REQUIERMENTS['os_image']) if image_id == '': step_failure( "vIMS", "Error : Failed to find required OS image for cloudify manager") cw.set_image_id(image_id) ext_net = os_utils.get_external_net(neutron) if not ext_net: step_failure("vIMS", "Failed to get external network") cw.set_external_network_name(ext_net) # ############### CLEARWATER DEPLOYMENT ################ start_time_ts = time.time() end_time_ts = start_time_ts logger.info("vIMS VNF deployment Start Time:'%s'" % ( datetime.datetime.fromtimestamp(start_time_ts).strftime( '%Y-%m-%d %H:%M:%S'))) error = cw.deploy_vnf(CW_BLUEPRINT) if error: step_failure("vIMS", error) end_time_ts = time.time() duration = round(end_time_ts - start_time_ts, 1) logger.info("vIMS VNF deployment duration:'%s'" % duration) set_result("vIMS", duration, "") # ############### CLEARWATER TEST ################ test_clearwater() # ########## CLEARWATER UNDEPLOYMENT ############ cw.undeploy_vnf() # ########### CLOUDIFY UNDEPLOYMENT ############# cfy.undeploy_manager() # ############## GENERAL CLEANUP ################ if args.noclean: exit(0) ks_creds = os_utils.get_credentials("keystone") keystone = ksclient.Client(**ks_creds) logger.info("Removing %s tenant .." % CFY_INPUTS['keystone_tenant_name']) tenant_id = os_utils.get_tenant_id( keystone, CFY_INPUTS['keystone_tenant_name']) if tenant_id == '': logger.error("Error : Failed to get id of %s tenant" % CFY_INPUTS['keystone_tenant_name']) else: if not os_utils.delete_tenant(keystone, tenant_id): logger.error("Error : Failed to remove %s tenant" % CFY_INPUTS['keystone_tenant_name']) logger.info("Removing %s user .." % CFY_INPUTS['keystone_username']) user_id = os_utils.get_user_id( keystone, CFY_INPUTS['keystone_username']) if user_id == '': logger.error("Error : Failed to get id of %s user" % CFY_INPUTS['keystone_username']) else: if not os_utils.delete_user(keystone, user_id): logger.error("Error : Failed to remove %s user" % CFY_INPUTS['keystone_username'])