Пример #1
0
    def test_plug_mgmt_vif(self, mock_vm_get, mock_plug, mock_vs_search,
                           mock_crt_cna):
        """Tests that a mgmt vif can be created."""
        inst = powervm.TEST_INSTANCE

        # Mock up the rmc vswitch
        vswitch_w = mock.MagicMock()
        vswitch_w.href = 'fake_mgmt_uri'
        mock_vs_search.return_value = [vswitch_w]

        # Run method such that it triggers a fresh CNA search
        p_vifs = tf_net.PlugMgmtVif(self.apt, inst)
        p_vifs.execute(None)

        # With the default get_cnas mock (which returns a Mock()), we think we
        # found an existing management CNA.
        mock_crt_cna.assert_not_called()
        mock_vm_get.assert_called_once_with(
            self.apt, inst, vswitch_uri='fake_mgmt_uri')

        # Now mock get_cnas to return no hits
        mock_vm_get.reset_mock()
        mock_vm_get.return_value = []
        p_vifs.execute(None)

        # Get was called; and since it didn't have the mgmt CNA, so was plug.
        self.assertEqual(1, mock_crt_cna.call_count)
        mock_vm_get.assert_called_once_with(
            self.apt, inst, vswitch_uri='fake_mgmt_uri')

        # Now pass CNAs, but not the mgmt vif, "from PlugVifs"
        cnas = [mock.Mock(vswitch_uri='uri1'), mock.Mock(vswitch_uri='uri2')]
        mock_crt_cna.reset_mock()
        mock_vm_get.reset_mock()
        p_vifs.execute(cnas)

        # Get wasn't called, since the CNAs were passed "from PlugVifs"; but
        # since the mgmt vif wasn't included, plug was called.
        mock_vm_get.assert_not_called()
        mock_crt_cna.assert_called()

        # Finally, pass CNAs including the mgmt.
        cnas.append(mock.Mock(vswitch_uri='fake_mgmt_uri'))
        mock_crt_cna.reset_mock()
        p_vifs.execute(cnas)

        # Neither get nor plug was called.
        mock_vm_get.assert_not_called()
        mock_crt_cna.assert_not_called()

        # Validate args on taskflow.task.Task instantiation
        with mock.patch('taskflow.task.Task.__init__') as tf:
            tf_net.PlugMgmtVif(self.apt, inst)
        tf.assert_called_once_with(
            name='plug_mgmt_vif', provides='mgmt_cna', requires=['vm_cnas'])
Пример #2
0
    def spawn(self,
              context,
              instance,
              image_meta,
              injected_files,
              admin_password,
              allocations,
              network_info=None,
              block_device_info=None):
        """Create a new instance/VM/domain on the virtualization platform.

        Once this successfully completes, the instance should be
        running (power_state.RUNNING).

        If this fails, any partial instance should be completely
        cleaned up, and the virtualization platform should be in the state
        that it was before this call began.

        :param context: security context
        :param instance: nova.objects.instance.Instance
                         This function should use the data there to guide
                         the creation of the new instance.
        :param nova.objects.ImageMeta image_meta:
            The metadata of the image of the instance.
        :param injected_files: User files to inject into instance.
        :param admin_password: Administrator password to set in instance.
        :param allocations: Information about resources allocated to the
                            instance via placement, of the form returned by
                            SchedulerReportClient.get_allocations_for_consumer.
        :param network_info: instance network information
        :param block_device_info: Information about block devices to be
                                  attached to the instance.
        """
        self._log_operation('spawn', instance)
        # Define the flow
        flow_spawn = tf_lf.Flow("spawn")

        # This FeedTask accumulates VIOS storage connection operations to be
        # run in parallel. Include both SCSI and fibre channel mappings for
        # the scrubber.
        stg_ftsk = pvm_par.build_active_vio_feed_task(
            self.adapter, xag={pvm_const.XAG.VIO_SMAP, pvm_const.XAG.VIO_FMAP})

        flow_spawn.add(
            tf_vm.Create(self.adapter, self.host_wrapper, instance, stg_ftsk))

        # Create a flow for the IO
        flow_spawn.add(
            tf_net.PlugVifs(self.virtapi, self.adapter, instance,
                            network_info))
        flow_spawn.add(tf_net.PlugMgmtVif(self.adapter, instance))

        # Create the boot image.
        flow_spawn.add(
            tf_stg.CreateDiskForImg(self.disk_dvr, context, instance,
                                    image_meta))
        # Connects up the disk to the LPAR
        flow_spawn.add(
            tf_stg.AttachDisk(self.disk_dvr, instance, stg_ftsk=stg_ftsk))

        # If the config drive is needed, add those steps.  Should be done
        # after all the other I/O.
        if configdrive.required_by(instance):
            flow_spawn.add(
                tf_stg.CreateAndConnectCfgDrive(self.adapter,
                                                instance,
                                                injected_files,
                                                network_info,
                                                stg_ftsk,
                                                admin_pass=admin_password))

        # Add the transaction manager flow at the end of the 'I/O
        # connection' tasks. This will run all the connections in parallel.
        flow_spawn.add(stg_ftsk)

        # Last step is to power on the system.
        flow_spawn.add(tf_vm.PowerOn(self.adapter, instance))

        # Run the flow.
        tf_base.run(flow_spawn, instance=instance)