def plug_vifs(self, instance, network_info): """Plug VIFs into networks.""" self._log_operation('plug_vifs', instance) # Define the flow flow = tf_lf.Flow("plug_vifs") # Get the LPAR Wrapper flow.add(tf_vm.Get(self.adapter, instance)) # Run the attach flow.add( tf_net.PlugVifs(self.virtapi, self.adapter, instance, network_info)) # Run the flow try: tf_base.run(flow, instance=instance) except exc.InstanceNotFound: raise exc.VirtualInterfacePlugException( _("Plug vif failed because instance %s was not found.") % instance.name) except Exception: LOG.exception("PowerVM error plugging vifs.", instance=instance) raise exc.VirtualInterfacePlugException( _("Plug vif failed because of an unexpected error."))
def test_plug_vifs_rmc(self, mock_cna_get, mock_plug): """Tests that a crt vif can be done with secure RMC.""" inst = powervm.TEST_INSTANCE # Mock up the CNA response. One should already exist, the other # should not. pre_cnas = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11')] mock_cna_get.return_value = copy.deepcopy(pre_cnas) # Mock up the network info. This also validates that they will be # sanitized to upper case. net_info = [ {'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}, {'address': 'aa:bb:cc:dd:ee:22', 'vnic_type': 'normal'}, ] # First run the CNA update, then the CNA create. mock_new_cna = mock.Mock(spec=pvm_net.CNA) mock_plug.side_effect = ['upd_cna', mock_new_cna] # Run method p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info) all_cnas = p_vifs.execute(self.mock_lpar_wrap) # new vif should be created twice. mock_plug.assert_any_call(self.apt, inst, net_info[0], new_vif=False) mock_plug.assert_any_call(self.apt, inst, net_info[1], new_vif=True) # The Task provides the list of original CNAs plus only CNAs that were # created. self.assertEqual(pre_cnas + [mock_new_cna], all_cnas)
def test_plug_vifs_revert(self, mock_vm_get, mock_plug, mock_unplug): """Tests that the revert flow works properly.""" inst = powervm.TEST_INSTANCE # Fake CNA list. The one pre-existing VIF should *not* get reverted. cna_list = [cna('AABBCCDDEEFF'), cna('FFEEDDCCBBAA')] mock_vm_get.return_value = cna_list # Mock up the network info. Three roll backs. net_info = [ {'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}, {'address': 'aa:bb:cc:dd:ee:22', 'vnic_type': 'normal'}, {'address': 'aa:bb:cc:dd:ee:33', 'vnic_type': 'normal'} ] # Make sure we test raising an exception mock_unplug.side_effect = [exception.NovaException(), None] # Run method p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info) p_vifs.execute(self.mock_lpar_wrap) p_vifs.revert(self.mock_lpar_wrap, mock.Mock(), mock.Mock()) # The unplug should be called twice. The exception shouldn't stop the # second call. self.assertEqual(2, mock_unplug.call_count) # Make sure each call is invoked correctly. The first plug was not a # new vif, so it should not be reverted. c2 = mock.call(self.apt, inst, net_info[1], cna_w_list=cna_list) c3 = mock.call(self.apt, inst, net_info[2], cna_w_list=cna_list) mock_unplug.assert_has_calls([c2, c3])
def test_get_vif_events(self): # Set up common mocks. inst = powervm.TEST_INSTANCE net_info = [mock.MagicMock(), mock.MagicMock()] net_info[0]['id'] = 'a' net_info[0].get.return_value = False net_info[1]['id'] = 'b' net_info[1].get.return_value = True # Set up the runner. p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info) p_vifs.crt_network_infos = net_info resp = p_vifs._get_vif_events() # Only one should be returned since only one was active. self.assertEqual(1, len(resp))
def test_plug_vifs_invalid_state(self, mock_vm_get, mock_plug): """Tests that a crt_vif fails when the LPAR state is bad.""" inst = powervm.TEST_INSTANCE # Mock up the CNA response. Only doing one for simplicity mock_vm_get.return_value = [] net_info = [{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}] # Mock that the state is incorrect self.mock_lpar_wrap.can_modify_io.return_value = False, 'bad' # Run method p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info) self.assertRaises(exception.VirtualInterfaceCreateException, p_vifs.execute, self.mock_lpar_wrap) # The create should not have been invoked self.assertEqual(0, mock_plug.call_count)
def test_plug_vifs_timeout(self, mock_vm_get, mock_plug): """Tests that crt vif failure via loss of neutron callback.""" inst = powervm.TEST_INSTANCE # Mock up the CNA response. Only doing one for simplicity mock_vm_get.return_value = [cna('AABBCCDDEE11')] # Mock up the network info. net_info = [{'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}] # Ensure that an exception is raised by a timeout. mock_plug.side_effect = eventlet.timeout.Timeout() # Run method p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info) self.assertRaises(exception.VirtualInterfaceCreateException, p_vifs.execute, self.mock_lpar_wrap) # The create should have only been called once. self.assertEqual(1, mock_plug.call_count)
def test_plug_vifs_rmc_no_create(self, mock_vm_get, mock_plug): """Verifies if no creates are needed, none are done.""" inst = powervm.TEST_INSTANCE # Mock up the CNA response. Both should already exist. mock_vm_get.return_value = [cna('AABBCCDDEEFF'), cna('AABBCCDDEE11')] # Mock up the network info. This also validates that they will be # sanitized to upper case. This also validates that we don't call # get_vnics if no nets have vnic_type 'direct'. net_info = [ {'address': 'aa:bb:cc:dd:ee:ff', 'vnic_type': 'normal'}, {'address': 'aa:bb:cc:dd:ee:11', 'vnic_type': 'normal'} ] # Run method p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info) p_vifs.execute(self.mock_lpar_wrap) # The create should have been called with new_vif as False. mock_plug.assert_any_call(self.apt, inst, net_info[0], new_vif=False) mock_plug.assert_any_call(self.apt, inst, net_info[1], new_vif=False)
def spawn(self, context, instance, image_meta, injected_files, admin_password, allocations, network_info=None, block_device_info=None): """Create a new instance/VM/domain on the virtualization platform. Once this successfully completes, the instance should be running (power_state.RUNNING). If this fails, any partial instance should be completely cleaned up, and the virtualization platform should be in the state that it was before this call began. :param context: security context :param instance: nova.objects.instance.Instance This function should use the data there to guide the creation of the new instance. :param nova.objects.ImageMeta image_meta: The metadata of the image of the instance. :param injected_files: User files to inject into instance. :param admin_password: Administrator password to set in instance. :param allocations: Information about resources allocated to the instance via placement, of the form returned by SchedulerReportClient.get_allocations_for_consumer. :param network_info: instance network information :param block_device_info: Information about block devices to be attached to the instance. """ self._log_operation('spawn', instance) # Define the flow flow_spawn = tf_lf.Flow("spawn") # This FeedTask accumulates VIOS storage connection operations to be # run in parallel. Include both SCSI and fibre channel mappings for # the scrubber. stg_ftsk = pvm_par.build_active_vio_feed_task( self.adapter, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP}) flow_spawn.add( tf_vm.Create(self.adapter, self.host_wrapper, instance, stg_ftsk)) # Create a flow for the IO flow_spawn.add( tf_net.PlugVifs(self.virtapi, self.adapter, instance, network_info)) flow_spawn.add(tf_net.PlugMgmtVif(self.adapter, instance)) # Create the boot image. flow_spawn.add( tf_stg.CreateDiskForImg(self.disk_dvr, context, instance, image_meta)) # Connects up the disk to the LPAR flow_spawn.add( tf_stg.AttachDisk(self.disk_dvr, instance, stg_ftsk=stg_ftsk)) # If the config drive is needed, add those steps. Should be done # after all the other I/O. if configdrive.required_by(instance): flow_spawn.add( tf_stg.CreateAndConnectCfgDrive(self.adapter, instance, injected_files, network_info, stg_ftsk, admin_pass=admin_password)) # Add the transaction manager flow at the end of the 'I/O # connection' tasks. This will run all the connections in parallel. flow_spawn.add(stg_ftsk) # Last step is to power on the system. flow_spawn.add(tf_vm.PowerOn(self.adapter, instance)) # Run the flow. tf_base.run(flow_spawn, instance=instance)