예제 #1
0
    def setup(self):
        """
        Setting up test parameters
        """
        log.info("Starting the test setup")
        super(TestPvcMultiClonePerformance, self).setup()
        self.benchmark_name = "pvc_multi_clone_performance"

        # Run the test in its own project (namespace)
        self.create_test_project()

        self.num_of_clones = 512

        # Getting the total Storage capacity
        self.ceph_capacity = int(self.ceph_cluster.get_ceph_capacity())
        # Use 70% of the storage capacity in the test
        self.capacity_to_use = int(self.ceph_capacity * 0.7)

        # since we do not want to use more then 65%, we add 35% to the needed
        # capacity, and minimum PVC size is 1 GiB
        self.need_capacity = int((self.num_of_clones + 2) * 1.35)
        # Test will run only on system with enough capacity
        if self.capacity_to_use < self.need_capacity:
            err_msg = (f"The system have only {self.ceph_capacity} GiB, "
                       f"we want to use only {self.capacity_to_use} GiB, "
                       f"and we need {self.need_capacity} GiB to run the test")
            log.error(err_msg)
            raise exceptions.StorageNotSufficientException(err_msg)

        # Calculating the PVC size in GiB
        self.pvc_size = int(self.capacity_to_use / (self.num_of_clones + 2))

        if self.dev_mode:
            self.num_of_clones = 10
            self.pvc_size = 3

        # Calculating the file size as 70% of the PVC size - in MB
        self.filesize = int(self.pvc_size * 0.70 * constants.GB2MB)
        # Change the file size to MB for the FIO function
        self.file_size = f"{self.filesize}M"
예제 #2
0
    def test_pvc_multiple_snapshot_performance(
        self,
        interface_iterate,
        teardown_factory,
        storageclass_factory,
        pvc_factory,
        pod_factory,
    ):
        """
        1. Creating PVC
           size is depend on storage capacity, but not less then 1 GiB
           it will use ~75% capacity of the Storage, Min storage capacity 1 TiB
        2. Fill the PVC with 80% of data
        3. Take a snapshot of the PVC and measure the time of creation.
        4. re-write the data on the PVC
        5. Take a snapshot of the PVC and measure the time of creation.
        6. repeat steps 4-5 the numbers of snapshot we want to take : 512
           this will be run by outside script for low memory consumption
        7. print all information.

        Raises:
            StorageNotSufficientException: in case of not enough capacity

        """
        # Number od snapshot for CephFS is 100 and for RBD is 512
        num_of_snaps = 100
        if interface_iterate == constants.CEPHBLOCKPOOL:
            num_of_snaps = 512

        # Getting the total Storage capacity
        ceph_cluster = CephCluster()
        ceph_capacity = int(ceph_cluster.get_ceph_capacity())

        # Use 70% of the storage capacity in the test
        capacity_to_use = int(ceph_capacity * 0.7)

        # since we do not want to use more then 65%, we add 35% to the needed
        # capacity, and minimum PVC size is 1 GiB
        need_capacity = int((num_of_snaps + 2) * 1.35)
        # Test will run only on system with enough capacity
        if capacity_to_use < need_capacity:
            err_msg = (f"The system have only {ceph_capacity} GiB, "
                       f"we want to use only {capacity_to_use} GiB, "
                       f"and we need {need_capacity} GiB to run the test")
            log.error(err_msg)
            raise exceptions.StorageNotSufficientException(err_msg)

        # Calculating the PVC size in GiB
        pvc_size = int(capacity_to_use / (num_of_snaps + 2))

        self.interface = interface_iterate
        self.sc_obj = storageclass_factory(self.interface)

        self.pvc_obj = pvc_factory(interface=self.interface,
                                   size=pvc_size,
                                   status=constants.STATUS_BOUND)

        self.pod_obj = pod_factory(interface=self.interface,
                                   pvc=self.pvc_obj,
                                   status=constants.STATUS_RUNNING)

        # Calculating the file size as 80% of the PVC size
        filesize = self.pvc_obj.size * 0.80
        # Change the file size to MB for the FIO function
        file_size = f"{int(filesize * constants.GB2MB)}M"
        file_name = self.pod_obj.name

        log.info(f"Total capacity size is : {ceph_capacity} GiB, "
                 f"Going to use {need_capacity} GiB, "
                 f"With {num_of_snaps} Snapshots to {pvc_size} GiB PVC. "
                 f"File size to be written is : {file_size} "
                 f"with the name of {file_name}")

        os.environ["SNAPNUM"] = f"{num_of_snaps}"
        os.environ["LOGPATH"] = f"{ocsci_log_path()}"
        os.environ["FILESIZE"] = file_size
        os.environ["NSPACE"] = self.pvc_obj.namespace
        os.environ["PODNAME"] = self.pod_obj.name
        os.environ["PVCNAME"] = self.pvc_obj.name
        os.environ["INTERFACE"] = self.interface

        main_script = "tests/e2e/performance/test_multi_snapshots.py"
        result = subprocess.run([main_script], stdout=subprocess.PIPE)
        log.info(f"Results from main script : {result.stdout.decode('utf-8')}")

        if "All results are" not in result.stdout.decode("utf-8"):
            log.error("Test did not completed")
            raise Exception("Test did not completed")
    def test_pvc_multiple_clone_performance(
        self,
        interface_iterate,
        teardown_factory,
        storageclass_factory,
        pvc_factory,
        pod_factory,
    ):
        """
        1. Creating PVC
           PVC size is calculated in the test and depends on the storage capacity, but not less then 1 GiB
           it will use ~75% capacity of the Storage, Min storage capacity 1 TiB
        2. Fill the PVC with 70% of data
        3. Take a clone of the PVC and measure time and speed of creation by reading start creation and end creation
            times from relevant logs
        4. Repeat the previous step number of times (maximal num_of_clones is 512)
        5. Print all measured statistics for all the clones.

        Raises:
            StorageNotSufficientException: in case of not enough capacity on the cluster

        """
        num_of_clones = 512

        # Getting the total Storage capacity
        ceph_cluster = CephCluster()
        ceph_capacity = int(ceph_cluster.get_ceph_capacity())

        # Use 70% of the storage capacity in the test
        capacity_to_use = int(ceph_capacity * 0.7)

        # since we do not want to use more then 65%, we add 35% to the needed
        # capacity, and minimum PVC size is 1 GiB
        need_capacity = int((num_of_clones + 2) * 1.35)
        # Test will run only on system with enough capacity
        if capacity_to_use < need_capacity:
            err_msg = (f"The system have only {ceph_capacity} GiB, "
                       f"we want to use only {capacity_to_use} GiB, "
                       f"and we need {need_capacity} GiB to run the test")
            log.error(err_msg)
            raise exceptions.StorageNotSufficientException(err_msg)

        # Calculating the PVC size in GiB
        pvc_size = int(capacity_to_use / (num_of_clones + 2))

        self.interface = interface_iterate
        self.sc_obj = storageclass_factory(self.interface)

        self.pvc_obj = pvc_factory(interface=self.interface,
                                   size=pvc_size,
                                   status=constants.STATUS_BOUND)

        self.pod_obj = pod_factory(interface=self.interface,
                                   pvc=self.pvc_obj,
                                   status=constants.STATUS_RUNNING)

        # Calculating the file size as 70% of the PVC size
        filesize = self.pvc_obj.size * 0.70
        # Change the file size to MB for the FIO function
        file_size = f"{int(filesize * constants.GB2MB)}M"
        file_name = self.pod_obj.name

        log.info(f"Total capacity size is : {ceph_capacity} GiB, "
                 f"Going to use {need_capacity} GiB, "
                 f"With {num_of_clones} clones to {pvc_size} GiB PVC. "
                 f"File size to be written is : {file_size} "
                 f"with the name of {file_name}")
        self.params = {}
        self.params["clonenum"] = f"{num_of_clones}"
        self.params["filesize"] = file_size
        self.params["ERRMSG"] = "Error in command"

        clone_yaml = self.build_params()
        performance_lib.write_fio_on_pod(self.pod_obj, file_size)

        # Running the test
        results = []
        for test_num in range(1, int(self.params["clonenum"]) + 1):
            log.info(f"Starting test number {test_num}")
            ct = self.create_clone(test_num, clone_yaml)
            speed = self.params["datasize"] / ct
            results.append({"Clone Num": test_num, "time": ct, "speed": speed})
            log.info(
                f"Results for clone number {test_num} are : "
                f"Creation time is {ct} secs, Creation speed {speed} MB/sec")

        for r in results:
            log.info(
                f"Clone number {r['Clone Num']} creation time is {r['time']} secs."
            )
            log.info(
                f"Clone number {r['Clone Num']} creation speed is {r['speed']} MB/sec."
            )
    def test_pvc_multiple_clone_performance(
        self,
        interface_iterate,
        teardown_factory,
        storageclass_factory,
        pvc_factory,
        pod_factory,
    ):
        """
        1. Creating PVC
           PVC size is calculated in the test and depends on the storage capacity, but not less then 1 GiB
           it will use ~75% capacity of the Storage, Min storage capacity 1 TiB
        2. Fill the PVC with 70% of data
        3. Take a clone of the PVC and measure time and speed of creation by reading start creation and end creation
            times from relevant logs
        4. Repeat the previous step number of times (maximal num_of_clones is 512)
        5. Print all measured statistics for all the clones.

        Raises:
            StorageNotSufficientException: in case of not enough capacity on the cluster

        """
        num_of_clones = 512

        # Getting the total Storage capacity
        ceph_cluster = CephCluster()
        ceph_capacity = int(ceph_cluster.get_ceph_capacity())

        # Use 70% of the storage capacity in the test
        capacity_to_use = int(ceph_capacity * 0.7)

        # since we do not want to use more then 65%, we add 35% to the needed
        # capacity, and minimum PVC size is 1 GiB
        need_capacity = int((num_of_clones + 2) * 1.35)
        # Test will run only on system with enough capacity
        if capacity_to_use < need_capacity:
            err_msg = (f"The system have only {ceph_capacity} GiB, "
                       f"we want to use only {capacity_to_use} GiB, "
                       f"and we need {need_capacity} GiB to run the test")
            log.error(err_msg)
            raise exceptions.StorageNotSufficientException(err_msg)

        # Calculating the PVC size in GiB
        pvc_size = int(capacity_to_use / (num_of_clones + 2))

        self.interface = interface_iterate
        self.sc_obj = storageclass_factory(self.interface)

        if self.interface == constants.CEPHFILESYSTEM:
            sc = "CephFS"
        if self.interface == constants.CEPHBLOCKPOOL:
            sc = "RBD"

        self.full_log_path = get_full_test_logs_path(cname=self)
        self.full_log_path += f"-{sc}"

        self.pvc_obj = pvc_factory(interface=self.interface,
                                   size=pvc_size,
                                   status=constants.STATUS_BOUND)

        self.pod_obj = pod_factory(interface=self.interface,
                                   pvc=self.pvc_obj,
                                   status=constants.STATUS_RUNNING)

        # Calculating the file size as 70% of the PVC size
        filesize = self.pvc_obj.size * 0.70
        # Change the file size to MB for the FIO function
        file_size = f"{int(filesize * constants.GB2MB)}M"
        file_name = self.pod_obj.name

        log.info(f"Total capacity size is : {ceph_capacity} GiB, "
                 f"Going to use {need_capacity} GiB, "
                 f"With {num_of_clones} clones to {pvc_size} GiB PVC. "
                 f"File size to be written is : {file_size} "
                 f"with the name of {file_name}")
        self.params = {}
        self.params["clonenum"] = f"{num_of_clones}"
        self.params["filesize"] = file_size
        self.params["ERRMSG"] = "Error in command"

        clone_yaml = self.build_params()
        performance_lib.write_fio_on_pod(self.pod_obj, file_size)

        # Running the test
        results = []
        for test_num in range(1, int(self.params["clonenum"]) + 1):
            log.info(f"Starting test number {test_num}")
            ct = self.create_clone(test_num, clone_yaml)
            speed = self.params["datasize"] / ct
            results.append({"Clone Num": test_num, "time": ct, "speed": speed})
            log.info(
                f"Results for clone number {test_num} are : "
                f"Creation time is {ct} secs, Creation speed {speed} MB/sec")

        for r in results:
            log.info(
                f"Clone number {r['Clone Num']} creation time is {r['time']} secs."
            )
            log.info(
                f"Clone number {r['Clone Num']} creation speed is {r['speed']} MB/sec."
            )

        creation_time_list = [r["time"] for r in results]
        average_creation_time = statistics.mean(creation_time_list)
        log.info(f"Average creation time is  {average_creation_time} secs.")

        creation_speed_list = [r["speed"] for r in results]
        average_creation_speed = statistics.mean(creation_speed_list)
        log.info(f"Average creation speed is  {average_creation_time} MB/sec.")

        self.results_path = get_full_test_logs_path(cname=self)
        # Produce ES report
        # Collecting environment information
        self.get_env_info()

        # Initialize the results doc file.
        full_results = self.init_full_results(
            ResultsAnalyse(
                self.uuid,
                self.crd_data,
                self.full_log_path,
                "pvc_multiple_clone_measurement",
            ))

        full_results.add_key("interface", self.interface)
        full_results.add_key("clones_num", num_of_clones)
        full_results.add_key("clone_size", pvc_size)
        full_results.add_key("multi_clone_creation_time", creation_time_list)
        full_results.add_key("multi_clone_creation_time_average",
                             average_creation_time)
        full_results.add_key("multi_clone_creation_speed", creation_speed_list)
        full_results.add_key("multi_clone_creation_speed_average",
                             average_creation_speed)

        # Write the test results into the ES server
        if full_results.es_write():
            res_link = full_results.results_link()
            log.info(f"The Result can be found at : {res_link}")

            # Create text file with results of all subtest (4 - according to the parameters)
            self.write_result_to_file(res_link)
예제 #5
0
    def test_pvc_multiple_snapshot_performance(
        self,
        pvc_factory,
        pod_factory,
        secret_factory,
        interface_type,
        snap_number,
    ):
        """
        1. Creating PVC
           size is depend on storage capacity, but not less then 1 GiB
           it will use ~75% capacity of the Storage, Min storage capacity 1 TiB
        2. Fill the PVC with 80% of data
        3. Take a snapshot of the PVC and measure the total and CSI times of creation.
        4. re-write the data on the PVC
        5. Take a snapshot of the PVC and measure the total and the CSI times of creation.
        6. repeat steps 4-5 the numbers of snapshot we want to take : 512
           this will be run by outside script for low memory consumption
        7. print all information.

        Raises:
            StorageNotSufficientException: in case of not enough capacity

        """

        # Getting the full path for the test logs
        self.results_path = get_full_test_logs_path(cname=self)
        self.full_log_path = f"{self.results_path}-{interface_type}-{snap_number}"
        log.info(f"Logs file path name is : {self.full_log_path}")
        log.info(f"Reslut path is : {self.results_path}")

        self.full_teardown = True
        self.num_of_snaps = snap_number
        if self.dev_mode:
            self.num_of_snaps = 2

        log.info(
            f"Going to create {self.num_of_snaps} {interface_type} snapshots")

        # since we do not want to use more then 65%, we add 35% to the needed
        # capacity, and minimum PVC size is 1 GiB
        self.need_capacity = int((self.num_of_snaps + 2) * 1.35)

        # Test will run only on system with enough capacity
        if self.capacity_to_use < self.need_capacity:
            err_msg = (f"The system have only {self.ceph_capacity} GiB, "
                       f"we want to use only {self.capacity_to_use} GiB, "
                       f"and we need {self.need_capacity} GiB to run the test")
            log.error(err_msg)
            raise exceptions.StorageNotSufficientException(err_msg)

        # Calculating the PVC size in GiB
        self.pvc_size = int(self.capacity_to_use / (self.num_of_snaps + 2))
        if self.dev_mode:
            self.pvc_size = 5

        self.interface = interface_type
        self.sc_name = "pas-testing-rbd"
        pool_name = self.sc_name
        if self.interface == constants.CEPHFILESYSTEM:
            self.sc_name = "pas-testing-cephfs"
            pool_name = f"{self.sc_name}-data0"

        # Creating new storage pool
        self.create_new_pool(self.sc_name)

        # Creating new StorageClass (pool) for the test.
        secret = secret_factory(interface=self.interface)
        self.sc_obj = helpers.create_storage_class(
            interface_type=self.interface,
            interface_name=pool_name,
            secret_name=secret.name,
            sc_name=self.sc_name,
            fs_name=self.sc_name,
        )
        log.info(f"The new SC is : {self.sc_obj.name}")
        log.debug(f"All SC data is {json.dumps(self.sc_obj.data, indent=3)}")

        # Create new VolumeSnapshotClass
        self.snap_class = self.create_snapshotclass(self.interface)

        # Create new PVC
        log.info(f"Creating {self.pvc_size} GiB PVC of {interface_type}")
        self.pvc_obj = pvc_factory(
            interface=self.interface,
            storageclass=self.sc_obj,
            size=self.pvc_size,
            status=constants.STATUS_BOUND,
            project=self.proj,
        )

        # Create POD which will attache to the new PVC
        log.info("Creating A POD")
        self.pod_obj = pod_factory(
            interface=self.interface,
            pvc=self.pvc_obj,
            status=constants.STATUS_RUNNING,
            pod_dict_path=constants.PERF_POD_YAML,
        )

        # Calculating the file size as 80% of the PVC size
        self.filesize = self.pvc_obj.size * 0.80
        # Change the file size to MB for the FIO function
        self.file_size = f"{int(self.filesize * constants.GB2MB)}M"
        self.file_name = self.pod_obj.name

        log.info(
            f"Total capacity size is : {self.ceph_capacity} GiB, "
            f"Going to use {self.need_capacity} GiB, "
            f"With {self.num_of_snaps} Snapshots to {self.pvc_size} GiB PVC. "
            f"File size to be written is : {self.file_size} "
            f"with the name of {self.file_name}")

        # Reading basic snapshot yaml file
        self.snap_yaml = constants.CSI_CEPHFS_SNAPSHOT_YAML
        self.sc = constants.DEFAULT_VOLUMESNAPSHOTCLASS_CEPHFS
        self.fs_type = "cephfs"
        if interface_type == constants.CEPHBLOCKPOOL:
            self.snap_yaml = constants.CSI_RBD_SNAPSHOT_YAML
            self.fs_type = "rbd"
            self.sc = constants.DEFAULT_VOLUMESNAPSHOTCLASS_RBD
        with open(self.snap_yaml, "r") as stream:
            try:
                self.snap_templ = yaml.safe_load(stream)
                self.snap_templ["spec"]["volumeSnapshotClassName"] = self.sc
                self.snap_templ["spec"]["source"][
                    "persistentVolumeClaimName"] = self.pvc_obj.name
            except yaml.YAMLError as exc:
                log.error(f"Can not read template yaml file {exc}")
        log.debug(
            f"Snapshot yaml file : {self.snap_yaml} "
            f"Content of snapshot yaml file {json.dumps(self.snap_templ, indent=4)}"
        )

        self.build_fio_command()
        self.start_time = self.get_time()

        # Initialize the results doc file.
        full_results = self.init_full_results(
            ResultsAnalyse(self.uuid, self.crd_data, self.full_log_path,
                           "multiple_snapshots"))
        full_results.all_results = self.run()
        self.end_time = self.get_time()
        full_results.add_key(
            "avg_creation_time",
            f"{float(self.total_creation_time / self.num_of_snaps):.2f}",
        )
        full_results.add_key(
            "avg_csi_creation_time",
            f"{float(self.total_csi_creation_time / self.num_of_snaps):.2f}",
        )
        full_results.add_key(
            "avg_creation_speed",
            f"{float(self.total_creation_speed / self.num_of_snaps):.2f}",
        )
        full_results.add_key("test_time", {
            "start": self.start_time,
            "end": self.end_time
        })

        # Writing the analyzed test results to the Elastic-Search server
        if full_results.es_write():
            res_link = full_results.results_link()
            log.info(f"The Result can be found at : {res_link}")

            # Create text file with results of all subtests (2 - according to the parameters)
            self.write_result_to_file(res_link)
    def test_pvc_multiple_snapshot_performance(
        self,
        pvc_factory,
        pod_factory,
        secret_factory,
        interface_type,
        snap_number,
    ):
        """
        1. Creating PVC
           size is depend on storage capacity, but not less then 1 GiB
           it will use ~75% capacity of the Storage, Min storage capacity 1 TiB
        2. Fill the PVC with 80% of data
        3. Take a snapshot of the PVC and measure the time of creation.
        4. re-write the data on the PVC
        5. Take a snapshot of the PVC and measure the time of creation.
        6. repeat steps 4-5 the numbers of snapshot we want to take : 512
           this will be run by outside script for low memory consumption
        7. print all information.

        Raises:
            StorageNotSufficientException: in case of not enough capacity

        """

        self.num_of_snaps = snap_number
        if self.dev_mode:
            self.num_of_snaps = 2

        log.info(
            f"Going to Create {self.num_of_snaps} {interface_type} snapshots")

        # since we do not want to use more then 65%, we add 35% to the needed
        # capacity, and minimum PVC size is 1 GiB
        self.need_capacity = int((self.num_of_snaps + 2) * 1.35)

        # Test will run only on system with enough capacity
        if self.capacity_to_use < self.need_capacity:
            err_msg = (f"The system have only {self.ceph_capacity} GiB, "
                       f"we want to use only {self.capacity_to_use} GiB, "
                       f"and we need {self.need_capacity} GiB to run the test")
            log.error(err_msg)
            raise exceptions.StorageNotSufficientException(err_msg)

        # Calculating the PVC size in GiB
        self.pvc_size = int(self.capacity_to_use / (self.num_of_snaps + 2))
        if self.dev_mode:
            self.pvc_size = 5

        self.interface = interface_type
        self.sc_name = "pas-testing-rbd"
        pool_name = self.sc_name
        if self.interface == constants.CEPHFILESYSTEM:
            self.sc_name = "pas-testing-cephfs"
            pool_name = f"{self.sc_name}-data0"

        # Creating new storage pool
        self.create_new_pool(self.sc_name)

        # Creating new StorageClass (pool) for the test.
        secret = secret_factory(interface=self.interface)
        self.sc_obj = helpers.create_storage_class(
            interface_type=self.interface,
            interface_name=pool_name,
            secret_name=secret.name,
            sc_name=self.sc_name,
            fs_name=self.sc_name,
        )
        log.info(f"The new SC is : {self.sc_obj.name}")
        log.debug(f"All Sc data is {json.dumps(self.sc_obj.data, indent=3)}")

        # Create new VolumeSnapshotClass
        self.snap_class = self.create_snapshotclass(self.interface)

        # Create new PVC
        log.info(f"Creating {self.pvc_size} GiB PVC of {interface_type}")
        self.pvc_obj = pvc_factory(
            interface=self.interface,
            storageclass=self.sc_obj,
            size=self.pvc_size,
            status=constants.STATUS_BOUND,
            project=self.proj,
        )

        # Create POD which will attache to the new PVC
        log.info("Creating A POD")
        self.pod_obj = pod_factory(
            interface=self.interface,
            pvc=self.pvc_obj,
            status=constants.STATUS_RUNNING,
            pod_dict_path=constants.PERF_POD_YAML,
        )

        # Calculating the file size as 80% of the PVC size
        self.filesize = self.pvc_obj.size * 0.80
        # Change the file size to MB for the FIO function
        self.file_size = f"{int(self.filesize * constants.GB2MB)}M"
        self.file_name = self.pod_obj.name

        log.info(
            f"Total capacity size is : {self.ceph_capacity} GiB, "
            f"Going to use {self.need_capacity} GiB, "
            f"With {self.num_of_snaps} Snapshots to {self.pvc_size} GiB PVC. "
            f"File size to be written is : {self.file_size} "
            f"with the name of {self.file_name}")

        # Reading basic snapshot yaml file
        self.snap_yaml = constants.CSI_CEPHFS_SNAPSHOT_YAML
        self.sc = constants.DEFAULT_VOLUMESNAPSHOTCLASS_CEPHFS
        self.fs_type = "cephfs"
        if interface_type == constants.CEPHBLOCKPOOL:
            self.snap_yaml = constants.CSI_RBD_SNAPSHOT_YAML
            self.fs_type = "rbd"
            self.sc = constants.DEFAULT_VOLUMESNAPSHOTCLASS_RBD
        with open(self.snap_yaml, "r") as stream:
            try:
                self.snap_templ = yaml.safe_load(stream)
                self.snap_templ["spec"]["volumeSnapshotClassName"] = self.sc
                self.snap_templ["spec"]["source"][
                    "persistentVolumeClaimName"] = self.pvc_obj.name
            except yaml.YAMLError as exc:
                log.error(f"Can not read template yaml file {exc}")
        log.debug(
            f"Snapshot yaml file : {self.snap_yaml} "
            f"Content of snapshot yaml file {json.dumps(self.snap_templ, indent=4)}"
        )

        self.get_log_names()
        self.build_fio_command()

        self.run()