コード例 #1
0
    def execution(self, agg_trigger=False):
        """
        Common method to run the tests.

        Args:
           agg_trigger(Boolean): Check if Aggregation is happening after IO
                                 or not, By default it's False.

        """
        self.pool.connect()
        self.log.info("pool_percentage Before = %s",
                      self.pool.pool_percentage_used())

        # Write the IOR data set with given all the EC object type
        self.ior_write_dataset()

        if agg_trigger:
            # Verify that Aggregation is getting started
            if not any(check_aggregation_status(self.pool).values()):
                self.fail("Aggregation failed to start..")
        else:
            # Verify that Aggregation is not starting
            if any(check_aggregation_status(self.pool).values()) is True:
                self.fail("Aggregation should not happens...")

        # Read IOR data and verify content
        self.ior_read_dataset()

        # Kill the last server rank
        self.server_managers[0].stop_ranks([self.server_count - 1],
                                           self.d_log,
                                           force=True)

        # Wait for rebuild to complete
        self.pool.wait_for_rebuild(False)

        # Read IOR data and verify for different EC object data still OK
        # written before killing the single server
        self.ior_read_dataset()

        # Kill the another server rank
        self.server_managers[0].stop_ranks([self.server_count - 2],
                                           self.d_log,
                                           force=True)

        # Wait for rebuild to complete
        self.pool.wait_for_rebuild(False)

        # Read IOR data and verify for different EC object and different sizes
        # written before killing the second server.
        # Only +2 (Parity) data will be intact so read and verify only +2 IOR
        # data set
        self.ior_read_dataset(parity=2)
コード例 #2
0
    def execution(self, agg_check=None):
        """
        Common test execution method to write data, verify aggregation, restart
        all the servers and read data.

        Args:
            agg_check: When to check Aggregation status.Either before restarting all the servers or
                       after.Default is None so not to check aggregation, but wait for 20 seconds
                       and restart the servers.
        """
        # Write all EC object data to NVMe
        self.ior_write_dataset(operation="Auto_Write", percent=self.percent)
        self.log.info(self.pool.pool_percentage_used())
        # Write all EC object data to SCM
        self.ior_write_dataset(storage='SCM',
                               operation="Auto_Write",
                               percent=self.percent)
        self.log.info(self.pool.pool_percentage_used())

        if not agg_check:
            # Set time mode aggregation
            self.pool.set_property("reclaim", "time")
            # Aggregation will start in 20 seconds after it sets to time mode. So wait for 20
            # seconds and restart all the servers.
            time.sleep(20)

        if agg_check == "Before":
            # Verify if Aggregation is getting started
            if not any(
                    check_aggregation_status(self.pool, attempt=50).values()):
                self.fail(
                    "Aggregation failed to start Before server restart..")

        # Shutdown the servers and restart
        self.get_dmg_command().system_stop(True)
        time.sleep(5)
        self.get_dmg_command().system_start()

        if agg_check == "After":
            # Verify if Aggregation is getting started
            if not any(
                    check_aggregation_status(self.pool, attempt=50).values()):
                self.fail("Aggregation failed to start After server restart..")

        # Read all EC object data from NVMe
        self.ior_read_dataset(operation="Auto_Read", percent=self.percent)
        # Read all EC object data which was written on SCM
        self.read_set_from_beginning = False
        self.ior_read_dataset(storage='SCM',
                              operation="Auto_Read",
                              percent=self.percent)
コード例 #3
0
ファイル: aggregation.py プロジェクト: liw/daos
    def test_ec_aggregation_time(self):
        """Jira ID: DAOS-7325.

        Test Description: Test Erasure code object aggregation time mode
                          with IOR.
        Use Case: Create the pool,Set aggregation as time mode.
                  run IOR with supported EC object type classes.
                  Verify the Aggregation gets triggered in parallel and space
                  is getting reclaimed. Verify the IOR read data at the end.

        :avocado: tags=all,full_regression
        :avocado: tags=hw,large,ib2
        :avocado: tags=ec,aggregation,ec_array,ec_aggregation
        :avocado: tags=ec_aggregation_time
        """
        # Set time mode aggregation
        self.pool.set_property("reclaim", "time")
        self.pool.connect()
        self.log.info("pool_percentage Before = %s ",
                      self.pool.pool_percentage_used())

        # Write the IOR data set with given all the EC object type
        self.ior_write_dataset()

        # Verify if Aggregation is getting started
        if not any(check_aggregation_status(self.pool).values()):
            self.fail("Aggregation failed to start..")

        # Read IOR data and verify content
        self.ior_read_dataset()
コード例 #4
0
ファイル: aggregation.py プロジェクト: liw/daos
    def test_ec_aggregation_default(self):
        """Jira ID: DAOS-7325.

        Test Description: Test Erasure code object aggregation enabled(default)
                          mode with IOR.
        Use Case: Create the pool, run IOR with supported
                  EC object type classes. Verify the Aggregation gets
                  triggered and space is getting reclaimed.
                  Verify the IOR read data at the end.

        :avocado: tags=all,full_regression
        :avocado: tags=hw,large,ib2
        :avocado: tags=ec,aggregation,ec_array,ec_aggregation
        :avocado: tags=ec_aggregation_default
        """
        self.pool.connect()
        self.log.info("pool_percentage Before = %s ",
                      self.pool.pool_percentage_used())

        for oclass in self.obj_class:
            for sizes in self.ior_chu_trs_blk_size:
                # Write single IOR
                self.ior_write_single_dataset(oclass, sizes)

                # Verify if Aggregation is getting started
                if not any(check_aggregation_status(self.pool).values()):
                    self.fail("Aggregation failed to start..")

                # Read single IOR
                self.ior_read_single_dataset(oclass, sizes)

                # Increase the container UUID count and read the latest data.
                self.cont_number += 1
コード例 #5
0
ファイル: aggregation.py プロジェクト: liw/daos
    def test_ec_aggregation_disabled(self):
        """Jira ID: DAOS-7325.

        Test Description: Test Erasure code object aggregation disabled mode
                          with IOR.
        Use Case: Create the pool, disabled aggregation, run IOR with supported
                  EC object type. Verify that Aggregation should not
                  triggered. Verify the IOR read data at the end.

        :avocado: tags=all,full_regression
        :avocado: tags=hw,large,ib2
        :avocado: tags=ec,aggregation,ec_array,ec_aggregation
        :avocado: tags=ec_aggregation_disabled
        """
        # Disable the aggregation
        self.pool.set_property("reclaim", "disabled")
        self.pool.connect()
        self.log.info("pool_percentage Before = %s ",
                      self.pool.pool_percentage_used())

        # Write the IOR data set with given all the EC object type
        self.ior_write_dataset()

        # Verify if Aggregation is getting started
        if any(check_aggregation_status(self.pool).values()) is True:
            self.fail("Aggregation should not happens...")

        # Read IOR data and verify content
        self.ior_read_dataset()
コード例 #6
0
ファイル: rebuild_disabled.py プロジェクト: liw/daos
    def test_ec_degrade(self):
        """Jira ID: DAOS-5893.

        Test Description: Test Erasure code object with IOR.
        Use Case: Create the pool, disabled rebuild, run IOR with supported
                  EC object type class for small and large transfer sizes.
                  kill single server, verify all IOR read data and verified.

        :avocado: tags=all,full_regression
        :avocado: tags=hw,large,ib2
        :avocado: tags=ec,ec_array,ec_disabled_rebuild,rebuild
        :avocado: tags=ec_disabled_rebuild_array

        """
        # Disabled pool Rebuild
        self.pool.set_property("self_heal", "exclude")
        # self.pool.set_property("reclaim", "disabled")

        # Write the IOR data set with given all the EC object type
        self.ior_write_dataset()

        # Verify if Aggregation is getting started
        if not any(check_aggregation_status(self.pool).values()):
            self.fail("Aggregation failed to start..")

        # Kill the last server rank and wait for 20 seconds, Rebuild is disabled
        # so data should not be rebuild
        self.server_managers[0].stop_ranks([self.server_count - 1],
                                           self.d_log,
                                           force=True)
        time.sleep(20)

        # Read IOR data and verify for different EC object and different sizes
        # written before killing the single server
        self.ior_read_dataset()

        # Kill the another server rank and wait for 20 seconds,Rebuild will
        # not happens because i's disabled.Read/verify data with Parity 2.
        self.server_managers[0].stop_ranks([self.server_count - 2],
                                           self.d_log,
                                           force=True)
        time.sleep(20)

        # Read IOR data and verify for different EC object and different sizes
        # written before killing the single server
        self.ior_read_dataset(parity=2)
コード例 #7
0
    def execution(self, rebuild_mode):
        """
        Test execution

        Args:
            rebuild_mode: On-line or off-line rebuild mode
        """
        # Kill last server rank first
        self.rank_to_kill = self.server_count - 1

        if 'on-line' in rebuild_mode:
            # Enabled on-line rebuild for the test
            self.set_online_rebuild = True

        # Write the Fio data and kill server if rebuild_mode is on-line
        self.start_online_fio()

        # Verify Aggregation should start for Partial stripes IO
        if not any(check_aggregation_status(self.pool, attempt=60).values()):
            self.fail("Aggregation failed to start..")

        if 'off-line' in rebuild_mode:
            self.server_managers[0].stop_ranks(
                [self.server_count - 1], self.d_log, force=True)

        # Adding unlink option for final read command
        if int(self.container.properties.value.split(":")[1]) == 1:
            self.fio_cmd._jobs['test'].unlink.value = 1

        # Read and verify the original data.
        self.fio_cmd._jobs['test'].rw.value = self.read_option
        self.fio_cmd.run()

        # If RF is 2 kill one more server and validate the data is not corrupted.
        if int(self.container.properties.value.split(":")[1]) == 2:
            self.fio_cmd._jobs['test'].unlink.value = 1
            self.log.info("RF is 2,So kill another server and verify data")
            # Kill one more server rank
            self.server_managers[0].stop_ranks([self.server_count - 2],
                                               self.d_log, force=True)
            # Read and verify the original data.
            self.fio_cmd.run()
コード例 #8
0
    def test_ec_offline_agg_during_rebuild(self):
        """Jira ID: DAOS-7313.

        Test Description: Test Erasure code object aggregation time mode with
                          IOR.
        Use Case: Create the pool, disabled aggregation.
                  run IOR with supported EC object type with partial strip.
                  Enable Aggregation as time mode. Wait for 20 seconds where
                  it will trigger the aggregation
                  Kill single server after 20 seconds and wait for rebuild.
                  Read and verify all the data.
                  Kill second server and wait for rebuild.
                  Read and verify data with +2 Parity with no data corruption.

        :avocado: tags=all,full_regression
        :avocado: tags=hw,large,ib2
        :avocado: tags=ec,aggregation,ec_array,ec_aggregation
        :avocado: tags=ec_offline_agg_during_rebuild
        """
        # Disable the aggregation
        self.pool.set_property("reclaim", "disabled")
        self.pool.connect()

        # Write the IOR data set with given all the EC object type
        self.ior_write_dataset()

        # Read IOR data and verify content
        self.ior_read_dataset()

        # Set time mode aggregation
        self.pool.set_property("reclaim", "time")

        # Aggregation will start in 20 seconds after it sets to time mode.
        # So wait for 20 seconds and kill the last server rank
        time.sleep(20)
        self.server_managers[0].stop_ranks([self.server_count - 1],
                                           self.d_log,
                                           force=True)

        # Verify if Aggregation is getting started
        if not any(check_aggregation_status(self.pool).values()):
            self.fail("Aggregation failed to start..")

        # Wait for rebuild to complete
        self.pool.wait_for_rebuild(False)

        # Read IOR data and verify for different EC object data still OK
        # written before killing the single server
        self.ior_read_dataset()

        # Kill the another server rank
        self.server_managers[0].stop_ranks([self.server_count - 2],
                                           self.d_log,
                                           force=True)

        # Wait for rebuild to complete
        self.pool.wait_for_rebuild(False)

        # Read IOR data and verify for different EC object and different sizes
        # written before killing the second server.
        # Only +2 (Parity) data will be intact so read and verify only +2 IOR
        # data set
        self.ior_read_dataset(parity=2)