def run(self, result):
        """execute the test"""

        if not self.setup_done:
            self.setup()

        if self.image_name is not None:
            self.openstack['image'] = op_utils.get_image_id(
                self.glance_client, self.image_name)
        if self.flavor_name is not None:
            self.openstack['flavor'] = op_utils.get_flavor_id(
                self.nova_client, self.flavor_name)

        vm = op_utils.create_instance_and_wait_for_active(self.openstack)

        if vm:
            result.update({"instance_create": 1})
            LOG.info("Create instance successful!")
        else:
            result.update({"instance_create": 0})
            LOG.error("Create instance failed!")

        try:
            keys = self.scenario_cfg.get('output', '').split()
        except KeyError:
            pass
        else:
            values = [vm.id]
            return self._push_to_outputs(keys, values)
    def run(self, result):
        """execute the test"""

        if not self.setup_done:
            self.setup()

        if self.image_name:
            self.image_id = op_utils.get_image_id(self.glance_client,
                                                  self.image_name)

        volume = op_utils.create_volume1(self.cinder_client, self.volume_name,
                                         self.volume_size, self.volume_type,
                                         self.image_id)

        status = volume.status
        while (status == 'creating' or status == 'downloading'):
            LOG.info("Volume status is: %s" % status)
            time.sleep(5)
            volume = op_utils.get_volume_by_name(self.volume_name)
            status = volume.status

        LOG.info("Create volume successful!")

        values = [volume.id]
        keys = self.scenario_cfg.get('output', '').split()
        return self._push_to_outputs(keys, values)
Exemple #3
0
    def run(self, result):
        """execute the benchmark"""

        if not self.setup_done:
            self.setup()

        network_id = op_utils.get_network_id(self.neutron_client,
                                             self.external_network)
        image_id = op_utils.get_image_id(self.glance_client, self.image)
        flavor_id = op_utils.get_flavor_id(self.nova_client,
                                           "yardstick-pinned-flavor")

        # Create multiple VMs to test CPU ran out
        LOG.debug("Creating server instance: test_server")
        json_body = {
            'flavor': flavor_id,
            'image': image_id,
            'nics': [{
                "net-id": network_id
            }],
            'name': "test_server"
        }
        self.instance = op_utils.create_instance(json_body)

        status = op_utils.check_status("ERROR", "test_server", 10, 5)

        if status:
            LOG.info("Create test_server failed: lack of resources.")
        else:
            LOG.info("Create test_server successful.")

        op_utils.delete_instance(self.nova_client, self.instance.id)
Exemple #4
0
    def run(self, result):
        """execute the benchmark"""

        network_id = op_utils.get_network_id(self.neutron_client,
                                             self.external_network)
        image_id = op_utils.get_image_id(self.glance_client, self.image)

        LOG.debug("Creating NUMA-pinned-instance-1...")
        self.instance = op_utils.create_instance_and_wait_for_active(
            "yardstick-pinned-flavor",
            image_id,
            network_id,
            instance_name="NUMA-pinned-instance-1")

        cmd = "virsh dumpxml %s" % self.instance.id
        LOG.debug("Dumping VM configrations: %s", cmd)
        status, stdout, stderr = self.client.execute(cmd)
        if status:
            raise RuntimeError(stderr)

        pinning = []
        root = ET.fromstring(stdout)
        for memnode in root.iter('memnode'):
            pinning.append(memnode.attrib)

        result.update({"pinning": pinning})

        # Create multiple VMs to test CPU ran out
        LOG.debug("Creating NUMA-pinned-instance-2...")
        self.instance_2 = op_utils.create_instance(
            "yardstick-pinned-flavor",
            image_id,
            network_id,
            instance_name="NUMA-pinned-instance-2")

        status = op_utils.check_status("ERROR", "NUMA-pinned-instance-2", 10,
                                       5)

        if status:
            print("Create NUMA-pinned-instance-2 failed: lack of resources.")

        if len(pinning) == 1 and status:
            result.update({"Test": 1})
            print("Test passed!")
        else:
            result.update({"Test": 0})
            print("Test failed!")

        op_utils.delete_instance(self.nova_client, self.instance.id)
        op_utils.delete_instance(self.nova_client, self.instance_2.id)
    def run(self, result):
        """execute the test"""

        if not self.setup_done:
            self.setup()

        self.image_id = op_utils.get_image_id(self.glance_client,
                                              self.image_name)
        LOG.info("Deleting image: %s", self.image_name)
        status = op_utils.delete_image(self.glance_client, self.image_id)

        if status:
            LOG.info("Delete image successful!")
            values = [status]
        else:
            LOG.info("Delete image failed!")
            values = []

        keys = self.scenario_cfg.get('output', '').split()
        return self._push_to_outputs(keys, values)
Exemple #6
0
    def run(self, result):
        """execute the benchmark"""

        network_id = op_utils.get_network_id(self.neutron_client,
                                             self.external_network)
        image_id = op_utils.get_image_id(self.glance_client, self.image)

        LOG.debug("Creating Virtaul machine: cpu-pinned-instance")
        self.instance = op_utils.create_instance_and_wait_for_active(
            "yardstick-pinned-flavor",
            image_id,
            network_id,
            instance_name="cpu-pinned-instance")

        cmd = "virsh dumpxml %s" % self.instance.id
        LOG.debug("Dumping VM configrations: %s", cmd)
        status, stdout, stderr = self.client.execute(cmd)
        if status:
            raise RuntimeError(stderr)

        pinning = []
        test_status = 1
        root = ET.fromstring(stdout)
        for vcpupin in root.iter('vcpupin'):
            pinning.append(vcpupin.attrib)

        for item in pinning:
            if str(item["cpuset"]) not in self.cpu_set:
                test_status = 0
                print("Test failed: VM CPU not pinned correctly!")
                break

        print("Test passed: VM CPU pinned correctly!")

        result.update({"Test": test_status})
        result.update({"pinning": pinning})

        op_utils.delete_instance(self.nova_client, self.instance.id)
Exemple #7
0
    def run(self, result):
        """execute the test"""

        if not self.setup_done:
            self.setup()

        self.image_id = op_utils.get_image_id(self.glance_client,
                                              self.image_name)

        kwargs = {}
        args_payload_list = [
            "name", "min-disk", "min-ram", "tags", "kernal-id", "architecture",
            "container-format", "disk-format", "protected", "public",
            "activate"
        ]

        for argument in args_payload_list:
            try:
                kwargs[argument] = self.options[argument]
            except KeyError:
                pass

        if self.custom_property:
            kwargs = dict(kwargs, **self.custom_property)

        status = op_utils.update_image(self.glance_client, self.image_id,
                                       **kwargs)

        if status:
            LOG.info("Update image successful!")
            rc = 0
        else:
            LOG.info("Update image failed!")
            rc = 1

        values = [rc]
        keys = self.scenario_cfg.get('output', '').split()
        return self._push_to_outputs(keys, values)
Exemple #8
0
    def run(self, result):
        """execute the benchmark"""

        # flavor1
        network_id = op_utils.get_network_id(self.neutron_client,
                                             self.external_network)
        image_id = op_utils.get_image_id(self.glance_client, self.image)
        free_mem_before = self._check_compute_node_free_hugepage(
            self.compute_node_name[0])
        self.instance = op_utils.create_instance_and_wait_for_active(
            self.flavor1,
            image_id,
            network_id,
            instance_name="hugepages-2M-VM")

        free_mem_after = self._check_compute_node_free_hugepage(
            self.compute_node_name[0])

        LOG.debug("free_mem_before: %s, after: %s", free_mem_before,
                  free_mem_after)
        op_utils.delete_instance(self.nova_client, self.instance.id)
        result.update({
            "hugepagesz-2M":
            self._pof(free_mem_before == free_mem_after + 512)
        })
        result.update({"2M-free-mem_before": free_mem_before})
        result.update({"2M-free-mem_after": free_mem_after})
        # flavor2
        network_id = op_utils.get_network_id(self.neutron_client,
                                             self.external_network)
        LOG.debug("self.external_network: %s, self.image: %s, flavor:%s",
                  self.external_network, self.image, self.flavor1)
        image_id = op_utils.get_image_id(self.glance_client, self.image)
        free_mem_before = self._check_compute_node_free_hugepage(
            self.compute_node_name[1])
        # config hugepages to be 1G and reboot
        status, stdout, stderr = self.client.execute(
            "sudo bash hugepages_config.sh")
        # wait to reeboot
        LOG.info("node restarting... wait 120s")
        time.sleep(120)
        LOG.info("node restarting... wait ends")

        self._ssh_host(self.compute_node_name[1])
        self.instance = op_utils.create_instance_and_wait_for_active(
            self.flavor2,
            image_id,
            network_id,
            instance_name="hugepages-1G-VM")
        free_mem_after = self._check_compute_node_free_hugepage(
            self.compute_node_name[1])

        LOG.debug("free_mem_before: %s, after: %s", free_mem_before,
                  free_mem_after)
        op_utils.delete_instance(self.nova_client, self.instance.id)
        result.update({
            "hugepagesz-1G":
            self._pof(free_mem_before == free_mem_after + 1)
        })
        result.update({"1G-free-mem_before": free_mem_before})
        result.update({"1G-free-mem_after": free_mem_after})