def setUp(self): """ Setup objects required by VM instances :return: """ guid = self.__class__.__name__ + '-' + str(uuid.uuid4()) self.nova = nova_utils.nova_client(self.os_creds, self.os_session) self.cinder = cinder_utils.cinder_client( self.os_creds, self.os_session) self.image_creator = None self.network_creator = None self.flavor_creator = None self.volume_creator = None self.instance_creator = None try: image_settings = openstack_tests.cirros_image_settings( name=guid + '-image', image_metadata=self.image_metadata) self.image_creator = OpenStackImage( self.os_creds, image_settings=image_settings) self.image_creator.create() network_settings = openstack_tests.get_priv_net_config( project_name=self.os_creds.project_name, net_name="{}-{}".format(guid, 'net'), subnet_name="{}-{}".format(guid, 'subnet')).network_settings self.network_creator = OpenStackNetwork( self.os_creds, network_settings) self.network_creator.create() flavor_settings = openstack_tests.get_flavor_config( name=guid + '-flavor', ram=256, disk=10, vcpus=1, metadata=self.flavor_metadata) self.flavor_creator = OpenStackFlavor( self.os_creds, flavor_settings) self.flavor_creator.create() # Create Volume volume_settings = VolumeConfig( name=self.__class__.__name__ + '-' + str(guid)) self.volume_creator = OpenStackVolume( self.os_creds, volume_settings) self.volume_creator.create(block=True) port_settings = PortConfig( name=guid + '-port', network_name=network_settings.name) instance_settings = VmInstanceConfig( name=guid + '-vm_inst', flavor=self.flavor_creator.flavor_settings.name, port_settings=[port_settings]) self.instance_creator = OpenStackVmInstance( self.os_creds, instance_settings, image_settings) self.instance_creator.create(block=True) except: self.tearDown() raise
def __create_vm_inst(self, heat_keypair_option, stack_server): vm_inst_settings = settings_utils.create_vm_inst_config( self.__nova, self._keystone, self.__neutron, stack_server, self._os_creds.project_name) image_settings = settings_utils.determine_image_config( self.__glance, stack_server, self.image_settings) keypair_settings = settings_utils.determine_keypair_config( self.__heat_cli, self.__stack, stack_server, keypair_settings=self.keypair_settings, priv_key_key=heat_keypair_option) vm_inst_creator = OpenStackVmInstance(self._os_creds, vm_inst_settings, image_settings, keypair_settings) vm_inst_creator.initialize() return vm_inst_creator
def create_vm_instance(os_creds, instance_settings, image_settings, keypair_creator=None, init_only=False): """ Creates a VM instance :param os_creds: The OpenStack credentials :param instance_settings: Instance of VmInstanceConfig :param image_settings: The object containing image settings :param keypair_creator: The object responsible for creating the keypair associated with this VM instance. (optional) :param init_only: Denotes whether or not this is being called for initialization (T) or creation (F) (default False) :return: A reference to the VM instance object """ kp_settings = None if keypair_creator: kp_settings = keypair_creator.keypair_settings vm_creator = OpenStackVmInstance(os_creds, instance_settings, image_settings, kp_settings) if init_only: vm_creator.initialize() else: vm_creator.create() return vm_creator
def test_vping_ssh(self, create_vm, path_exists, flavor_create, get_port_ip, vm_active, ssh_active, ssh_client, scp_client, trans_script, do_vping_ssh, ext_net_name): os_vm_inst = mock.MagicMock(name='get_console_output') os_vm_inst.get_console_output.return_value = 'vPing OK' ssh_client = mock.MagicMock(name='get_transport') ssh_client.get_transport.return_value = None scp_client = mock.MagicMock(name='put') scp_client.put.return_value = None with mock.patch('snaps.openstack.utils.deploy_utils.create_image', return_value=OpenStackImage(self.os_creds, None)), \ mock.patch('snaps.openstack.utils.deploy_utils.create_network', return_value=OpenStackNetwork( self.os_creds, NetworkConfig( name='foo', subnet_settings=[ SubnetConfig( name='bar', cidr='10.0.0.1/24')]))), \ mock.patch('snaps.openstack.utils.deploy_utils.' 'create_vm_instance', return_value=OpenStackVmInstance( self.os_creds, VmInstanceConfig( name='foo', flavor='bar', port_settings=[PortConfig( name='foo', network_name='bar')]), None)), \ mock.patch('snaps.openstack.utils.deploy_utils.create_keypair', return_value=OpenStackKeypair( self.os_creds, KeypairConfig(name='foo'))), \ mock.patch('snaps.openstack.utils.deploy_utils.create_router', return_value=OpenStackRouter( self.os_creds, RouterConfig(name='foo'))), \ mock.patch('snaps.openstack.utils.deploy_utils.' 'create_security_group', return_value=OpenStackSecurityGroup( self.os_creds, SecurityGroupConfig(name='foo'))), \ mock.patch('snaps.openstack.create_instance.' 'OpenStackVmInstance.' 'get_vm_inst', return_value=os_vm_inst), \ mock.patch('snaps.openstack.create_instance.' 'OpenStackVmInstance.' 'ssh_client', return_value=ssh_client): self.assertEquals(TestCase.EX_OK, self.vping_ssh.run())
def test_vping_userdata(self, deploy_vm, path_exists, create_flavor, get_port_ip, vm_active): with mock.patch('snaps.openstack.utils.deploy_utils.create_image', return_value=OpenStackImage(self.os_creds, None)), \ mock.patch('snaps.openstack.utils.deploy_utils.create_network', return_value=OpenStackNetwork( self.os_creds, NetworkConfig(name='foo'))), \ mock.patch('snaps.openstack.utils.deploy_utils.' 'create_vm_instance', return_value=OpenStackVmInstance( self.os_creds, VmInstanceConfig( name='foo', flavor='bar', port_settings=[PortConfig( name='foo', network_name='bar')]), None)), \ mock.patch('snaps.openstack.create_instance.' 'OpenStackVmInstance.get_console_output', return_value='vPing OK'): self.assertEquals(TestCase.EX_OK, self.vping_userdata.run())
def test_vping_userdata(self, *args): # pylint: disable=unused-argument with mock.patch('snaps.openstack.utils.deploy_utils.create_image', return_value=OpenStackImage(self.os_creds, None)), \ mock.patch('snaps.openstack.utils.deploy_utils.create_network', return_value=OpenStackNetwork( self.os_creds, NetworkConfig(name='foo'))), \ mock.patch('snaps.openstack.utils.deploy_utils.create_router', return_value=OpenStackRouter( self.os_creds, RouterConfig(name='foo'))), \ mock.patch('snaps.openstack.utils.deploy_utils.' 'create_vm_instance', return_value=OpenStackVmInstance( self.os_creds, VmInstanceConfig( name='foo', flavor='bar', port_settings=[PortConfig( name='foo', network_name='bar')]), None)), \ mock.patch('snaps.openstack.create_instance.' 'OpenStackVmInstance.get_console_output', return_value='vPing OK'): self.assertEquals(TestCase.EX_OK, self.vping_userdata.run())
def test_vnf(self): """Run IXIA Stress test on clearwater ims instance.""" start_time = time.time() cfy_client = self.orchestrator['object'] outputs = cfy_client.deployments.outputs.get( self.vnf['descriptor'].get('name'))['outputs'] dns_ip = outputs['dns_ip'] ellis_ip = outputs['ellis_ip'] self.__logger.info("Creating full IXIA network ...") subnet_settings = SubnetConfig(name='ixia_management_subnet', cidr='10.10.10.0/24', dns_nameservers=[env.get('NAMESERVER')]) network_settings = NetworkConfig(name='ixia_management_network', subnet_settings=[subnet_settings]) network_creator = OpenStackNetwork(self.snaps_creds, network_settings) network_creator.create() self.created_object.append(network_creator) ext_net_name = snaps_utils.get_ext_net_name(self.snaps_creds) router_creator = OpenStackRouter( self.snaps_creds, RouterConfig(name='ixia_management_router', external_gateway=ext_net_name, internal_subnets=[subnet_settings.name])) router_creator.create() self.created_object.append(router_creator) # security group creation self.__logger.info("Creating security groups for IXIA VMs") sg_rules = list() sg_rules.append( SecurityGroupRuleConfig(sec_grp_name="ixia_management", direction=Direction.ingress, protocol=Protocol.tcp, port_range_min=1, port_range_max=65535)) sg_rules.append( SecurityGroupRuleConfig(sec_grp_name="ixia_management", direction=Direction.ingress, protocol=Protocol.udp, port_range_min=1, port_range_max=65535)) sg_rules.append( SecurityGroupRuleConfig(sec_grp_name="ixia_management", direction=Direction.ingress, protocol=Protocol.icmp)) ixia_managment_sg_settings = SecurityGroupConfig( name="ixia_management", rule_settings=sg_rules) securit_group_creator = OpenStackSecurityGroup( self.snaps_creds, ixia_managment_sg_settings) securit_group_creator.create() self.created_object.append(securit_group_creator) sg_rules = list() sg_rules.append( SecurityGroupRuleConfig(sec_grp_name="ixia_ssh_http", direction=Direction.ingress, protocol=Protocol.tcp, port_range_min=1, port_range_max=65535)) ixia_ssh_http_sg_settings = SecurityGroupConfig(name="ixia_ssh_http", rule_settings=sg_rules) securit_group_creator = OpenStackSecurityGroup( self.snaps_creds, ixia_ssh_http_sg_settings) securit_group_creator.create() self.created_object.append(securit_group_creator) chassis_flavor_settings = FlavorConfig(name="ixia_vChassis", ram=4096, disk=40, vcpus=2) flavor_creator = OpenStackFlavor(self.snaps_creds, chassis_flavor_settings) flavor_creator.create() self.created_object.append(flavor_creator) card_flavor_settings = FlavorConfig(name="ixia_vCard", ram=4096, disk=4, vcpus=2) flavor_creator = OpenStackFlavor(self.snaps_creds, card_flavor_settings) flavor_creator.create() self.created_object.append(flavor_creator) load_flavor_settings = FlavorConfig(name="ixia_vLoad", ram=8192, disk=100, vcpus=4) flavor_creator = OpenStackFlavor(self.snaps_creds, load_flavor_settings) flavor_creator.create() self.created_object.append(flavor_creator) chassis_image_settings = ImageConfig( name=self.test['requirements']['chassis']['image'], image_user='******', exists=True) card_image_settings = ImageConfig( name=self.test['requirements']['card']['image'], image_user='******', exists=True) load_image_settings = ImageConfig( name=self.test['requirements']['load']['image'], image_user='******', exists=True) chassis_port_settings = PortConfig(name='ixia_chassis_port', network_name=network_settings.name) card1_port1_settings = PortConfig(name='ixia_card1_port1', network_name=network_settings.name) card2_port1_settings = PortConfig(name='ixia_card2_port1', network_name=network_settings.name) card1_port2_settings = PortConfig(name='ixia_card1_port2', network_name="cloudify_ims_network") card2_port2_settings = PortConfig(name='ixia_card2_port2', network_name="cloudify_ims_network") load_port_settings = PortConfig(name='ixia_load_port', network_name=network_settings.name) chassis_settings = VmInstanceConfig( name='ixia_vChassis', flavor=chassis_flavor_settings.name, port_settings=[chassis_port_settings], security_group_names=[ ixia_ssh_http_sg_settings.name, ixia_managment_sg_settings.name ], floating_ip_settings=[ FloatingIpConfig( name='ixia_vChassis_fip', port_name=chassis_port_settings.name, router_name=router_creator.router_settings.name) ]) vm_creator = OpenStackVmInstance(self.snaps_creds, chassis_settings, chassis_image_settings) self.__logger.info("Creating Ixia vChassis VM") vm_creator.create() fip_chassis = vm_creator.get_floating_ip().ip self.created_object.append(vm_creator) card1_settings = VmInstanceConfig( name='ixia_vCard1', flavor=card_flavor_settings.name, port_settings=[card1_port1_settings, card1_port2_settings], security_group_names=[ixia_managment_sg_settings.name]) vm_creator = OpenStackVmInstance(self.snaps_creds, card1_settings, card_image_settings) self.__logger.info("Creating Ixia vCard1 VM") vm_creator.create() vcard_ips = list() vcard_ips_p2 = list() vcard_ips.append(vm_creator.get_port_ip('ixia_card1_port1')) vcard_ips_p2.append(vm_creator.get_port_ip('ixia_card1_port2')) self.created_object.append(vm_creator) card2_settings = VmInstanceConfig( name='ixia_vCard2', flavor=card_flavor_settings.name, port_settings=[card2_port1_settings, card2_port2_settings], security_group_names=[ixia_managment_sg_settings.name]) vm_creator = OpenStackVmInstance(self.snaps_creds, card2_settings, card_image_settings) self.__logger.info("Creating Ixia vCard2 VM") vm_creator.create() vcard_ips.append(vm_creator.get_port_ip('ixia_card2_port1')) vcard_ips_p2.append(vm_creator.get_port_ip('ixia_card2_port2')) self.created_object.append(vm_creator) load_settings = VmInstanceConfig( name='ixia_vLoad', flavor=load_flavor_settings.name, port_settings=[load_port_settings], security_group_names=[ ixia_ssh_http_sg_settings.name, ixia_managment_sg_settings.name ], floating_ip_settings=[ FloatingIpConfig( name='ixia_vLoad_fip', port_name=load_port_settings.name, router_name=router_creator.router_settings.name) ]) vm_creator = OpenStackVmInstance(self.snaps_creds, load_settings, load_image_settings) self.__logger.info("Creating Ixia vLoad VM") vm_creator.create() fip_load = vm_creator.get_floating_ip().ip self.created_object.append(vm_creator) self.__logger.info("Chassis IP is: %s", fip_chassis) login_url = "https://" + str(fip_chassis) + "/api/v1/auth/session" cards_url = "https://" + str(fip_chassis) + "/api/v2/ixos/cards/" payload = json.dumps({ "username": "******", "password": "******", "rememberMe": "false" }) api_key = json.loads( (IxChassisUtils.ChassisRestAPI.postWithPayload(login_url, payload)))["apiKey"] self.__logger.info("Adding 2 card back inside the ixia chassis...") for ip in vcard_ips: payload = {"ipAddress": str(ip)} response = json.loads( IxChassisUtils.ChassisRestAPI.postOperation( cards_url, api_key, payload)) count = 0 while (int( IxChassisUtils.ChassisRestAPI.getWithHeaders( response['url'], api_key)['progress']) != 100): self.__logger.debug("Operation did not finish yet. \ Waiting for 1 more second..") time.sleep(1) if count > 60: raise Exception("Adding card take more than 60 seconds") count += 1 ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.MissingHostKeyPolicy()) ssh.connect(fip_chassis, username="******", password="******") cmd = "set license-check disable" run_blocking_ssh_command(ssh, cmd) cmd = "restart-service ixServer" run_blocking_ssh_command(ssh, cmd) self.config_ellis(ellis_ip) # Get IPs of P-CSCF resolver = dns.resolver.Resolver() resolver.nameservers = [dns_ip] result = resolver.query("bono.clearwater.local") iplistims = '' i = 0 for rdata in result: i = i + 1 print rdata.address iplistims += str(rdata.address) if i != len(result): iplistims += ';' kResourcesUrl = 'http://%s:%s/api/v0/resources' % (fip_load, 8080) kRxfPath = r"REG_CALL_OPNFV_v13.rxf" test_filname = self.test['inputs']['test_filname'] kGatewaySharedFolder = '/mnt/ixload-share/' kRxfRelativeUploadPath = 'uploads/%s' % os.path.split(kRxfPath)[1] kRxfAbsoluteUploadPath = os.path.join(kGatewaySharedFolder, kRxfRelativeUploadPath) kChassisList = [str(fip_chassis)] dataFileNameList = [ test_filname, 'Registration_only_LPS.tst', 'SIPCall.tst' ] kPortListPerCommunityCommunity = { "VoIP1@VM1": [(1, 1, 1)], "VoIP2@VM2": [(1, 2, 1)] } kStatsToDisplayDict = self.test['inputs']['stats'] connection = IxRestUtils.getConnection(fip_load, 8080) self.__logger.info("Creating a new session...") sessionUrl = IxLoadUtils.createSession(connection, self.test['version']) license_server = self.test['inputs']['licenseServer'] IxLoadUtils.configureLicenseServer(connection, sessionUrl, license_server) files_dir = os.path.join(self.case_dir, 'ixia/files') target_file = open(os.path.join(files_dir, test_filname), 'w') j2_env = Environment(loader=FileSystemLoader(files_dir), trim_blocks=True) self.test['inputs'].update( dict(ipchassis=fip_chassis, ipcard1=vcard_ips_p2[0], ipcard2=vcard_ips_p2[1], iplistims=iplistims)) target_file.write( j2_env.get_template(test_filname + '.template').render( self.test['inputs'])) target_file.close() self.__logger.info('Uploading files %s...' % kRxfPath) for dataFile in dataFileNameList: localFilePath = os.path.join(files_dir, dataFile) remoteFilePath = os.path.join(kGatewaySharedFolder, 'uploads/%s' % dataFile) IxLoadUtils.uploadFile(connection, kResourcesUrl, localFilePath, remoteFilePath) self.__logger.info('Upload file finished.') self.__logger.info("Loading repository %s..." % kRxfAbsoluteUploadPath) IxLoadUtils.loadRepository(connection, sessionUrl, kRxfAbsoluteUploadPath) self.__logger.info("Clearing chassis list...") IxLoadUtils.clearChassisList(connection, sessionUrl) self.__logger.info("Adding chassis %s..." % (kChassisList)) IxLoadUtils.addChassisList(connection, sessionUrl, kChassisList) self.__logger.info("Assigning new ports...") IxLoadUtils.assignPorts(connection, sessionUrl, kPortListPerCommunityCommunity) self.__logger.info("Starting the test...") IxLoadUtils.runTest(connection, sessionUrl) self.__logger.info("Polling values for stats %s..." % (kStatsToDisplayDict)) result = IxLoadUtils.pollStats(connection, sessionUrl, kStatsToDisplayDict) self.__logger.info("Test finished.") self.__logger.info("Checking test status...") testRunError = IxLoadUtils.getTestRunError(connection, sessionUrl) self.__logger.info(result) duration = time.time() - start_time self.details['test_vnf'].update(status='PASS', result=result, duration=duration) if testRunError: self.__logger.info("The test exited with following error: %s" % (testRunError)) self.details['test_vnf'].update(status='FAIL', duration=duration) return False else: self.__logger.info("The test completed successfully.") self.details['test_vnf'].update(status='PASS', duration=duration) self.result += 1 / 3 * 100 return True
def deploy_orchestrator(self): self.logger.info("Deploying Open Baton...") self.logger.info("Details: %s", self.mano['details']) start_time = time.time() self.logger.info("Creating orchestra instance...") userdata = get_userdata(self.mano) self.logger.info("flavor: %s\n" "image: %s\n" "network_id: %s\n", self.mano['details']['flavor']['name'], self.mano['requirements']['image'], self.mano['details']['network']['id']) self.logger.debug("userdata: %s\n", userdata) # setting up image image_settings = ImageSettings(name=self.mano['requirements']['image'], image_user='******', exists=True) # setting up port port_settings = PortSettings( name='%s_port' % self.case_name, network_name=self.mano['details']['network']['name']) # build configuration of vm orchestra_settings = VmInstanceSettings( name=self.case_name, flavor=self.mano['details']['flavor']['name'], port_settings=[port_settings], security_group_names=[self.mano['details']['sec_group']], userdata=userdata) orchestra_vm = OpenStackVmInstance(self.snaps_creds, orchestra_settings, image_settings) orchestra_vm.create() self.created_resources.append(orchestra_vm) self.mano['details']['id'] = orchestra_vm.get_vm_info()['id'] self.logger.info("Created orchestra instance: %s", self.mano['details']['id']) self.logger.info("Associating floating ip: '%s' to VM '%s' ", self.mano['details']['fip'].ip, self.case_name) nova_client = os_utils.get_nova_client() if not os_utils.add_floating_ip(nova_client, self.mano['details']['id'], self.mano['details']['fip'].ip): duration = time.time() - start_time self.details["orchestrator"].update(status='FAIL', duration=duration) self.logger.error("Cannot associate floating IP to VM.") return False self.logger.info("Waiting for Open Baton NFVO to be up and running...") timeout = 0 while timeout < 200: if servertest(self.mano['details']['fip'].ip, "8080"): break else: self.logger.info("Open Baton NFVO is not started yet (%ss)", (timeout * 5)) time.sleep(5) timeout += 1 if timeout >= 200: duration = time.time() - start_time self.details["orchestrator"].update(status='FAIL', duration=duration) self.logger.error("Open Baton is not started correctly") return False self.logger.info("Waiting for all components to be up and running...") time.sleep(60) duration = time.time() - start_time self.details["orchestrator"].update(status='PASS', duration=duration) self.logger.info("Deploy Open Baton NFVO: OK") return True
def deploy_orchestrator(self): """ Deploy Cloudify Manager. network, security group, fip, VM creation """ # network creation start_time = time.time() self.__logger.info("Creating keypair ...") kp_file = os.path.join(self.data_dir, "cloudify_vrouter.pem") keypair_settings = KeypairSettings(name='cloudify_vrouter_kp', private_filepath=kp_file) keypair_creator = OpenStackKeypair(self.snaps_creds, keypair_settings) keypair_creator.create() self.created_object.append(keypair_creator) self.__logger.info("Creating full network ...") subnet_settings = SubnetSettings(name='cloudify_vrouter_subnet', cidr='10.67.79.0/24') network_settings = NetworkSettings(name='cloudify_vrouter_network', subnet_settings=[subnet_settings]) network_creator = OpenStackNetwork(self.snaps_creds, network_settings) network_creator.create() self.created_object.append(network_creator) ext_net_name = snaps_utils.get_ext_net_name(self.snaps_creds) router_creator = OpenStackRouter( self.snaps_creds, RouterSettings(name='cloudify_vrouter_router', external_gateway=ext_net_name, internal_subnets=[subnet_settings.name])) router_creator.create() self.created_object.append(router_creator) # security group creation self.__logger.info("Creating security group for cloudify manager vm") sg_rules = list() sg_rules.append( SecurityGroupRuleSettings(sec_grp_name="sg-cloudify-manager", direction=Direction.ingress, protocol=Protocol.tcp, port_range_min=1, port_range_max=65535)) sg_rules.append( SecurityGroupRuleSettings(sec_grp_name="sg-cloudify-manager", direction=Direction.ingress, protocol=Protocol.udp, port_range_min=1, port_range_max=65535)) security_group_creator = OpenStackSecurityGroup( self.snaps_creds, SecurityGroupSettings(name="sg-cloudify-manager", rule_settings=sg_rules)) security_group_creator.create() self.created_object.append(security_group_creator) # orchestrator VM flavor self.__logger.info("Get or create flavor for cloudify manager vm ...") flavor_settings = FlavorSettings( name=self.orchestrator['requirements']['flavor']['name'], ram=self.orchestrator['requirements']['flavor']['ram_min'], disk=50, vcpus=2) flavor_creator = OpenStackFlavor(self.snaps_creds, flavor_settings) flavor_creator.create() self.created_object.append(flavor_creator) image_settings = ImageSettings( name=self.orchestrator['requirements']['os_image'], image_user='******', exists=True) port_settings = PortSettings(name='cloudify_manager_port', network_name=network_settings.name) manager_settings = VmInstanceSettings( name='cloudify_manager', flavor=flavor_settings.name, port_settings=[port_settings], security_group_names=[ security_group_creator.sec_grp_settings.name ], floating_ip_settings=[ FloatingIpSettings( name='cloudify_manager_fip', port_name=port_settings.name, router_name=router_creator.router_settings.name) ]) manager_creator = OpenStackVmInstance(self.snaps_creds, manager_settings, image_settings, keypair_settings) self.__logger.info("Creating cloudify manager VM") manager_creator.create() self.created_object.append(manager_creator) public_auth_url = os_utils.get_endpoint('identity') self.__logger.info("Set creds for cloudify manager") cfy_creds = dict(keystone_username=self.tenant_name, keystone_password=self.tenant_name, keystone_tenant_name=self.tenant_name, keystone_url=public_auth_url) cfy_client = CloudifyClient(host=manager_creator.get_floating_ip().ip, username='******', password='******', tenant='default_tenant') self.orchestrator['object'] = cfy_client self.cfy_manager_ip = manager_creator.get_floating_ip().ip self.__logger.info("Attemps running status of the Manager") cfy_status = None retry = 10 while str(cfy_status) != 'running' and retry: try: cfy_status = cfy_client.manager.get_status()['status'] self.__logger.debug("The current manager status is %s", cfy_status) except Exception: # pylint: disable=broad-except self.__logger.warning("Cloudify Manager isn't " + "up and running. Retrying ...") retry = retry - 1 time.sleep(30) if str(cfy_status) == 'running': self.__logger.info("Cloudify Manager is up and running") else: raise Exception("Cloudify Manager isn't up and running") self.__logger.info("Put OpenStack creds in manager") secrets_list = cfy_client.secrets.list() for k, val in cfy_creds.iteritems(): if not any(d.get('key', None) == k for d in secrets_list): cfy_client.secrets.create(k, val) else: cfy_client.secrets.update(k, val) duration = time.time() - start_time self.__logger.info("Put private keypair in manager") if manager_creator.vm_ssh_active(block=True): ssh = manager_creator.ssh_client() scp = SCPClient(ssh.get_transport(), socket_timeout=15.0) scp.put(kp_file, '~/') cmd = "sudo cp ~/cloudify_vrouter.pem /etc/cloudify/" run_blocking_ssh_command(ssh, cmd) cmd = "sudo chmod 444 /etc/cloudify/cloudify_vrouter.pem" run_blocking_ssh_command(ssh, cmd) cmd = "sudo yum install -y gcc python-devel" run_blocking_ssh_command(ssh, cmd, "Unable to install packages on manager") self.details['orchestrator'].update(status='PASS', duration=duration) self.vnf['inputs'].update(dict(external_network_name=ext_net_name)) return True
def tearDown(self): """ Cleans the stack and image """ if self.stack: try: heat_utils.delete_stack(self.heat_client, self.stack) # Wait until stack deployment has completed end_time = (time.time() + stack_config.STACK_COMPLETE_TIMEOUT) is_deleted = False while time.time() < end_time: status = heat_utils.get_stack_status( self.heat_client, self.stack.id) if status == stack_config.STATUS_DELETE_COMPLETE: is_deleted = True break elif status == stack_config.STATUS_DELETE_FAILED: is_deleted = False break time.sleep(3) if not is_deleted: nova = nova_utils.nova_client(self.os_creds, self.os_session) keystone = keystone_utils.keystone_client( self.os_creds, self.os_session) neutron = neutron_utils.neutron_client( self.os_creds, self.os_session) glance = glance_utils.glance_client( self.os_creds, self.os_session) servers = heat_utils.get_stack_servers( self.heat_client, nova, neutron, keystone, self.stack, self.os_creds.project_name) for server in servers: vm_settings = settings_utils.create_vm_inst_config( nova, keystone, neutron, server, self.os_creds.project_name) img_settings = settings_utils.determine_image_config( glance, server, [ self.image_creator1.image_settings, self.image_creator2.image_settings ]) vm_creator = OpenStackVmInstance( self.os_creds, vm_settings, img_settings) vm_creator.initialize() vm_creator.clean() vm_creator.vm_deleted(block=True) heat_utils.delete_stack(self.heat_client, self.stack) time.sleep(20) except: raise if self.image_creator1: try: self.image_creator1.clean() except: pass if self.image_creator2: try: self.image_creator2.clean() except: pass if self.keypair1_settings: expanded_path = os.path.expanduser( self.keypair1_settings.private_filepath) os.chmod(expanded_path, 0o755) os.remove(expanded_path) if self.keypair2_settings: expanded_path = os.path.expanduser( self.keypair2_settings.private_filepath) os.chmod(expanded_path, 0o755) os.remove(expanded_path) super(self.__class__, self).__clean__()
subnet_settings=[subnet_settings]) network = OpenStackNetwork(os_creds, network_settings) network.create() # Flavors from snaps.config.flavor import FlavorConfig from snaps.openstack.create_flavor import OpenStackFlavor flavor_settings = FlavorConfig(name='test-flavor', ram=256, disk=10, vcpus=2) flavor = OpenStackFlavor(os_creds, flavor_settings) flavor.create() # Instances from snaps.config.network import PortConfig from snaps.openstack.create_instance import OpenStackVmInstance port_settings = PortConfig(name='test-port', network_name=network_settings.name) instance_settings = VmInstanceConfig(name='test-inst', flavor=flavor_settings.name, port_settings=[port_settings]) vm_inst = OpenStackVmInstance(os_creds, instance_settings, image_settings) vm_inst.create(block=True) # Cleanup vm_inst.clean() flavor.clean() network.clean() image.clean()
def deploy_orchestrator(self): # pylint: disable=too-many-locals,too-many-statements """ Deploy Cloudify Manager. network, security group, fip, VM creation """ start_time = time.time() # orchestrator VM flavor self.__logger.info("Get or create flavor for cloudify manager vm ...") flavor_settings = FlavorConfig( name="{}-{}".format( self.orchestrator['requirements']['flavor']['name'], self.uuid), ram=self.orchestrator['requirements']['flavor']['ram_min'], disk=50, vcpus=2) flavor_creator = OpenStackFlavor(self.snaps_creds, flavor_settings) flavor_creator.create() self.created_object.append(flavor_creator) self.__logger.info("Creating a second user to bypass issues ...") user_creator = OpenStackUser( self.snaps_creds, UserConfig( name='cloudify_network_bug-{}'.format(self.uuid), password=str(uuid.uuid4()), project_name=self.tenant_name, domain_name=self.snaps_creds.user_domain_name, roles={'_member_': self.tenant_name})) user_creator.create() self.created_object.append(user_creator) snaps_creds = user_creator.get_os_creds(self.snaps_creds.project_name) self.__logger.debug("snaps creds: %s", snaps_creds) self.__logger.info("Creating keypair ...") kp_file = os.path.join(self.data_dir, "cloudify_ims.pem") keypair_settings = KeypairConfig( name='cloudify_ims_kp-{}'.format(self.uuid), private_filepath=kp_file) keypair_creator = OpenStackKeypair(snaps_creds, keypair_settings) keypair_creator.create() self.created_object.append(keypair_creator) # needs some images self.__logger.info("Upload some OS images if it doesn't exist") for image_name, image_file in self.images.iteritems(): self.__logger.info("image: %s, file: %s", image_name, image_file) if image_file and image_name: image_creator = OpenStackImage( snaps_creds, ImageConfig( name=image_name, image_user='******', img_format='qcow2', image_file=image_file)) image_creator.create() self.created_object.append(image_creator) # network creation self.__logger.info("Creating full network ...") subnet_settings = SubnetConfig( name='cloudify_ims_subnet-{}'.format(self.uuid), cidr='10.67.79.0/24', dns_nameservers=[env.get('NAMESERVER')]) network_settings = NetworkConfig( name='cloudify_ims_network-{}'.format(self.uuid), subnet_settings=[subnet_settings]) network_creator = OpenStackNetwork(snaps_creds, network_settings) network_creator.create() self.created_object.append(network_creator) ext_net_name = snaps_utils.get_ext_net_name(snaps_creds) router_creator = OpenStackRouter( snaps_creds, RouterConfig( name='cloudify_ims_router-{}'.format(self.uuid), external_gateway=ext_net_name, internal_subnets=[subnet_settings.name])) router_creator.create() self.created_object.append(router_creator) # security group creation self.__logger.info("Creating security group for cloudify manager vm") sg_rules = list() sg_rules.append( SecurityGroupRuleConfig( sec_grp_name="sg-cloudify-manager-{}".format(self.uuid), direction=Direction.ingress, protocol=Protocol.tcp, port_range_min=1, port_range_max=65535)) sg_rules.append( SecurityGroupRuleConfig( sec_grp_name="sg-cloudify-manager-{}".format(self.uuid), direction=Direction.ingress, protocol=Protocol.udp, port_range_min=1, port_range_max=65535)) security_group_creator = OpenStackSecurityGroup( snaps_creds, SecurityGroupConfig( name="sg-cloudify-manager-{}".format(self.uuid), rule_settings=sg_rules)) security_group_creator.create() self.created_object.append(security_group_creator) image_settings = ImageConfig( name=self.orchestrator['requirements']['os_image'], image_user='******', exists=True) port_settings = PortConfig( name='cloudify_manager_port-{}'.format(self.uuid), network_name=network_settings.name) manager_settings = VmInstanceConfig( name='cloudify_manager-{}'.format(self.uuid), flavor=flavor_settings.name, port_settings=[port_settings], security_group_names=[ security_group_creator.sec_grp_settings.name], floating_ip_settings=[FloatingIpConfig( name='cloudify_manager_fip-{}'.format(self.uuid), port_name=port_settings.name, router_name=router_creator.router_settings.name)]) manager_creator = OpenStackVmInstance( snaps_creds, manager_settings, image_settings, keypair_settings) self.__logger.info("Creating cloudify manager VM") manager_creator.create() self.created_object.append(manager_creator) public_auth_url = keystone_utils.get_endpoint(snaps_creds, 'identity') cfy_creds = dict( keystone_username=snaps_creds.username, keystone_password=snaps_creds.password, keystone_tenant_name=snaps_creds.project_name, keystone_url=public_auth_url, region=snaps_creds.region_name, user_domain_name=snaps_creds.user_domain_name, project_domain_name=snaps_creds.project_domain_name) self.__logger.info("Set creds for cloudify manager %s", cfy_creds) cfy_client = CloudifyClient( host=manager_creator.get_floating_ip().ip, username='******', password='******', tenant='default_tenant') self.orchestrator['object'] = cfy_client self.__logger.info("Attemps running status of the Manager") for loop in range(10): try: self.__logger.debug( "status %s", cfy_client.manager.get_status()) cfy_status = cfy_client.manager.get_status()['status'] self.__logger.info( "The current manager status is %s", cfy_status) if str(cfy_status) != 'running': raise Exception("Cloudify Manager isn't up and running") self.__logger.info("Put OpenStack creds in manager") secrets_list = cfy_client.secrets.list() for k, val in cfy_creds.iteritems(): if not any(d.get('key', None) == k for d in secrets_list): cfy_client.secrets.create(k, val) else: cfy_client.secrets.update(k, val) break except Exception: # pylint: disable=broad-except self.logger.info( "try %s: Cloudify Manager isn't up and running", loop + 1) time.sleep(30) else: self.logger.error("Cloudify Manager isn't up and running") return False duration = time.time() - start_time if manager_creator.vm_ssh_active(block=True): self.__logger.info("Put private keypair in manager") ssh = manager_creator.ssh_client() scp = SCPClient(ssh.get_transport(), socket_timeout=15.0) scp.put(kp_file, '~/') cmd = "sudo cp ~/cloudify_ims.pem /etc/cloudify/" self.run_blocking_ssh_command(ssh, cmd) cmd = "sudo chmod 444 /etc/cloudify/cloudify_ims.pem" self.run_blocking_ssh_command(ssh, cmd) # cmd2 is badly unpinned by Cloudify cmd = "sudo yum install -y gcc python-devel python-cmd2" self.run_blocking_ssh_command( ssh, cmd, "Unable to install packages on manager") self.run_blocking_ssh_command(ssh, 'cfy status') else: self.__logger.error("Cannot connect to manager") return False self.details['orchestrator'].update(status='PASS', duration=duration) self.vnf['inputs'].update(dict( external_network_name=ext_net_name, network_name=network_settings.name, key_pair_name=keypair_settings.name )) self.result = 1/3 * 100 return True
class NovaUtilsInstanceVolumeTests(OSComponentTestCase): """ Tests the creation of VM instances via nova_utils.py """ def setUp(self): """ Setup objects required by VM instances :return: """ guid = self.__class__.__name__ + '-' + str(uuid.uuid4()) self.nova = nova_utils.nova_client(self.os_creds, self.os_session) self.cinder = cinder_utils.cinder_client( self.os_creds, self.os_session) self.image_creator = None self.network_creator = None self.flavor_creator = None self.volume_creator = None self.instance_creator = None try: image_settings = openstack_tests.cirros_image_settings( name=guid + '-image', image_metadata=self.image_metadata) self.image_creator = OpenStackImage( self.os_creds, image_settings=image_settings) self.image_creator.create() network_settings = openstack_tests.get_priv_net_config( project_name=self.os_creds.project_name, net_name="{}-{}".format(guid, 'net'), subnet_name="{}-{}".format(guid, 'subnet')).network_settings self.network_creator = OpenStackNetwork( self.os_creds, network_settings) self.network_creator.create() flavor_settings = openstack_tests.get_flavor_config( name=guid + '-flavor', ram=256, disk=10, vcpus=1, metadata=self.flavor_metadata) self.flavor_creator = OpenStackFlavor( self.os_creds, flavor_settings) self.flavor_creator.create() # Create Volume volume_settings = VolumeConfig( name=self.__class__.__name__ + '-' + str(guid)) self.volume_creator = OpenStackVolume( self.os_creds, volume_settings) self.volume_creator.create(block=True) port_settings = PortConfig( name=guid + '-port', network_name=network_settings.name) instance_settings = VmInstanceConfig( name=guid + '-vm_inst', flavor=self.flavor_creator.flavor_settings.name, port_settings=[port_settings]) self.instance_creator = OpenStackVmInstance( self.os_creds, instance_settings, image_settings) self.instance_creator.create(block=True) except: self.tearDown() raise def tearDown(self): """ Cleanup deployed resources :return: """ if self.instance_creator: try: self.instance_creator.clean() except: pass if self.volume_creator: try: self.volume_creator.clean() except: pass if self.flavor_creator: try: self.flavor_creator.clean() except: pass if self.network_creator: try: self.network_creator.clean() except: pass if self.image_creator: try: self.image_creator.clean() except: pass super(self.__class__, self).__clean__() def test_add_remove_volume(self): """ Tests the nova_utils.attach_volume() and detach_volume functions with a timeout value :return: """ self.assertIsNotNone(self.volume_creator.get_volume()) self.assertEqual(0, len(self.volume_creator.get_volume().attachments)) # Attach volume to VM neutron = neutron_utils.neutron_client( self.os_creds, self.os_session) keystone = keystone_utils.keystone_client( self.os_creds, self.os_session) self.assertIsNotNone(nova_utils.attach_volume( self.nova, neutron, keystone, self.instance_creator.get_vm_inst(), self.volume_creator.get_volume(), self.os_creds.project_name)) vol_attach = None vol_detach = None attached = False start_time = time.time() while time.time() < start_time + 120: vol_attach = cinder_utils.get_volume_by_id( self.cinder, self.volume_creator.get_volume().id) if len(vol_attach.attachments) > 0: attached = True break time.sleep(3) self.assertTrue(attached) self.assertIsNotNone(vol_attach) keystone = keystone_utils.keystone_client( self.os_creds, self.os_session) vm_attach = nova_utils.get_server_object_by_id( self.nova, neutron, keystone, self.instance_creator.get_vm_inst().id, self.os_creds.project_name) # Validate Attachment self.assertIsNotNone(vol_attach) self.assertEqual(self.volume_creator.get_volume().id, vol_attach.id) self.assertEqual(1, len(vol_attach.attachments)) self.assertEqual(vm_attach.volume_ids[0]['id'], vol_attach.attachments[0]['volume_id']) # Detach volume to VM self.assertIsNotNone(nova_utils.detach_volume( self.nova, neutron, keystone, self.instance_creator.get_vm_inst(), self.volume_creator.get_volume(), self.os_creds.project_name)) start_time = time.time() while time.time() < start_time + 120: vol_detach = cinder_utils.get_volume_by_id( self.cinder, self.volume_creator.get_volume().id) if len(vol_detach.attachments) == 0: attached = False break time.sleep(3) self.assertFalse(attached) self.assertIsNotNone(vol_detach) vm_detach = nova_utils.get_server_object_by_id( self.nova, neutron, keystone, self.instance_creator.get_vm_inst().id, self.os_creds.project_name) # Validate Detachment self.assertIsNotNone(vol_detach) self.assertEqual(self.volume_creator.get_volume().id, vol_detach.id) self.assertEqual(0, len(vol_detach.attachments)) self.assertEqual(0, len(vm_detach.volume_ids)) def test_attach_volume_nowait(self): """ Tests the nova_utils.attach_volume() with a timeout value that is too small to have the volume attachment data to be included on the VmInst object that was supposed to be returned """ self.assertIsNotNone(self.volume_creator.get_volume()) self.assertEqual(0, len(self.volume_creator.get_volume().attachments)) # Attach volume to VM neutron = neutron_utils.neutron_client(self.os_creds, self.os_session) keystone = keystone_utils.keystone_client( self.os_creds, self.os_session) with self.assertRaises(NovaException): nova_utils.attach_volume( self.nova, neutron, keystone, self.instance_creator.get_vm_inst(), self.volume_creator.get_volume(), self.os_creds.project_name, 0) def test_detach_volume_nowait(self): """ Tests the nova_utils.detach_volume() with a timeout value that is too small to have the volume attachment data to be included on the VmInst object that was supposed to be returned """ self.assertIsNotNone(self.volume_creator.get_volume()) self.assertEqual(0, len(self.volume_creator.get_volume().attachments)) # Attach volume to VM neutron = neutron_utils.neutron_client(self.os_creds, self.os_session) keystone = keystone_utils.keystone_client( self.os_creds, self.os_session) nova_utils.attach_volume( self.nova, neutron, keystone, self.instance_creator.get_vm_inst(), self.volume_creator.get_volume(), self.os_creds.project_name) # Check VmInst for attachment keystone = keystone_utils.keystone_client( self.os_creds, self.os_session) latest_vm = nova_utils.get_server_object_by_id( self.nova, neutron, keystone, self.instance_creator.get_vm_inst().id, self.os_creds.project_name) self.assertEqual(1, len(latest_vm.volume_ids)) # Check Volume for attachment vol_attach = None attached = False start_time = time.time() while time.time() < start_time + 120: vol_attach = cinder_utils.get_volume_by_id( self.cinder, self.volume_creator.get_volume().id) if len(vol_attach.attachments) > 0: attached = True break time.sleep(3) self.assertTrue(attached) self.assertIsNotNone(vol_attach) # Detach volume with self.assertRaises(NovaException): nova_utils.detach_volume( self.nova, neutron, keystone, self.instance_creator.get_vm_inst(), self.volume_creator.get_volume(), self.os_creds.project_name, 0)