Пример #1
0
    def setUp(self):
        super(ObjectDataValidation, self).setUp()
        self.obj = None
        self.ioreq = None
        self.no_of_dkeys = None
        self.no_of_akeys = None
        self.array_size = None
        self.record_length = None
        self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0]
        self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0]
        self.array_size = self.params.get("size", '/array_size/')
        self.record_length = self.params.get("length", '/run/record/*')

        self.prepare_pool()

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.pool.handle)
        self.container.open()

        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj,
                               objtype=4)
Пример #2
0
    def setUp(self):
        super().setUp()
        self.agent_sessions = None
        self.pool = None
        self.container = None
        self.obj = None
        self.ioreq = None
        self.no_of_dkeys = None
        self.no_of_akeys = None
        self.array_size = None
        self.record_length = None

        self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0]
        self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0]
        self.record_length = self.params.get("length", '/run/record/*')

        self.add_pool(connect=False)
        self.pool.connect(2)

        self.csum = self.params.get("enable_checksum", '/run/container/*')
        self.container = DaosContainer(self.context)
        input_param = self.container.cont_input_values
        input_param.enable_chksum = self.csum
        self.container.create(poh=self.pool.pool.handle,
                              con_prop=input_param)
        self.container.open()

        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj, objtype=4)
Пример #3
0
 def write_single_object(self):
     """Write some data to the existing pool."""
     self.pool.connect(2)
     csum = self.params.get("enable_checksum", '/run/container/*')
     self.container = DaosContainer(self.context)
     input_param = self.container.cont_input_values
     input_param.enable_chksum = csum
     self.container.create(poh=self.pool.pool.handle, con_prop=input_param)
     self.container.open()
     self.obj = DaosObj(self.context, self.container)
     self.obj.create(objcls=1)
     self.obj.open()
     self.ioreq = IORequest(self.context,
                            self.container,
                            self.obj,
                            objtype=4)
     self.log.info("Writing the Single Dataset")
     for dkey in range(self.no_of_dkeys):
         for akey in range(self.no_of_akeys):
             indata = ("{0}".format(str(akey)[0]) * self.record_length)
             d_key_value = "dkey {0}".format(dkey)
             c_dkey = ctypes.create_string_buffer(d_key_value)
             a_key_value = "akey {0}".format(akey)
             c_akey = ctypes.create_string_buffer(a_key_value)
             c_value = ctypes.create_string_buffer(indata)
             c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
             self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size)
     self.obj.close()
     self.container.close()
Пример #4
0
    def write_a_bunch_of_values(self, how_many):
        """
        Write data to an object, each with a dkey and akey.  The how_many
        parameter determines how many key:value pairs are written.
        """

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.pool.handle)
        self.container.open()

        ioreq = IORequest(self.context, self.container, None)

        print("Started Writing the Dataset-----------\n")
        inc = 50000
        last_key = inc
        for key in range(how_many):
            c_dkey = create_string_buffer("dkey {0}".format(key))
            c_akey = create_string_buffer("akey {0}".format(key))
            c_value = create_string_buffer(
                "some data that gets stored with the key {0}".format(key))
            c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
            ioreq.single_insert(c_dkey,
                                c_akey,
                                c_value,
                                c_size)

            if key > last_key:
                print("written: {}".format(key))
                sys.stdout.flush()
                last_key = key + inc

        print("Started Verification of the Dataset-----------\n")
        last_key = inc
        for key in range(how_many):
            c_dkey = create_string_buffer("dkey {0}".format(key))
            c_akey = create_string_buffer("akey {0}".format(key))
            the_data = "some data that gets stored with the key {0}".format(key)
            val = ioreq.single_fetch(c_dkey,
                                     c_akey,
                                     len(the_data)+1)
            exp_value = val.value.decode("utf-8")
            if the_data != exp_value:
                self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}, "
                          "Expected Value={2} and Received Value={3}\n"
                          .format("dkey {0}".format(key),
                                  "akey {0}".format(key),
                                  the_data,
                                  exp_value))

            if key > last_key:
                print("veried: {}".format(key))
                sys.stdout.flush()
                last_key = key + inc

        print("starting destroy")
        self.container.close()
        self.container.destroy()
        print("destroy complete")
Пример #5
0
    def setUp(self):
        self.agent_sessions = None
        self.pool = None
        self.container = None
        self.obj = None
        self.ioreq = None
        self.hostlist = None
        self.hostfile = None
        self.no_of_dkeys = None
        self.no_of_akeys = None
        self.array_size = None
        self.record_length = None

        with open('../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        server_group = self.params.get("name", '/server_config/',
                                       'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib64/')
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(
            self.hostlist, self.workdir)
        self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0]
        self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0]
        self.array_size = self.params.get("size", '/array_size/')
        self.record_length = self.params.get("length", '/run/record/*')

        self.agent_sessions = agent_utils.run_agent(self.basepath,
                                                    self.hostlist)
        server_utils.run_server(self, self.hostfile, server_group)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(), os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         None)
        self.pool.connect(2)

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.handle)
        self.container.open()

        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj,
                               objtype=4)
Пример #6
0
 def reconnect(self):
     '''
     Function to reconnect the pool/container and reopen the Object
     for read verification.
     '''
     #Close the Obj/Container, Disconnect the Pool.
     self.obj.close()
     self.container.close()
     self.pool.disconnect()
     time.sleep(5)
     #Connect Pool, Open Container and Object
     self.pool.connect(2)
     self.container.open()
     self.obj.open()
     self.ioreq = IORequest(self.context,
                            self.container,
                            self.obj,
                            objtype=4)
Пример #7
0
    def setUp(self):
        super(ObjectDataValidation, self).setUp()
        self.agent_sessions = None
        self.pool = None
        self.container = None
        self.obj = None
        self.ioreq = None
        self.hostlist = None
        self.hostfile = None
        self.no_of_dkeys = None
        self.no_of_akeys = None
        self.array_size = None
        self.record_length = None
        server_group = self.params.get("name", '/server_config/',
                                       'daos_server')
        self.hostlist = self.params.get("test_servers", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(
            self.hostlist, self.workdir)
        self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0]
        self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0]
        self.array_size = self.params.get("size", '/array_size/')
        self.record_length = self.params.get("length", '/run/record/*')
        self.agent_sessions = agent_utils.run_agent(self, self.hostlist)
        server_utils.run_server(self, self.hostfile, server_group)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(), os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         None)
        self.pool.connect(2)

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.handle)
        self.container.open()

        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj,
                               objtype=4)
Пример #8
0
    def create_dataset(self, cont):
        """Create the dataset.

        Args:
            cont (TestContainer): the container

        """
        self.log.info("Creating dataset in %s/%s", str(cont.pool.uuid),
                      str(cont.uuid))

        cont.open()

        for obj_idx in range(self.num_objs):
            # Create a new obj
            obj = DaosObj(cont.pool.context, cont.container)
            self.obj_list.append(obj)

            obj.create(rank=obj_idx, objcls=2)
            obj.open()
            ioreq = IORequest(cont.pool.context, cont.container, obj)
            for dkey_idx in range(self.num_dkeys_per_obj):
                c_dkey = ctypes.create_string_buffer(
                    "dkey {}".format(dkey_idx).encode())

                for akey_idx in range(self.num_akeys_single_per_dkey):
                    # Round-robin to get the size of data and
                    # arbitrarily use a number 0-9 to fill data
                    akey_size_idx = akey_idx % len(self.akey_sizes)
                    data_size = self.akey_sizes[akey_size_idx]
                    data_val = str(akey_idx % 10)
                    data = data_size * data_val
                    c_akey = ctypes.create_string_buffer(
                        "akey single {}".format(akey_idx).encode())
                    c_value = ctypes.create_string_buffer(data.encode())
                    c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
                    ioreq.single_insert(c_dkey, c_akey, c_value, c_size)

                for akey_idx in range(self.num_akeys_array_per_dkey):
                    # Round-robin to get the size of data and
                    # the number of extents, and
                    # arbitrarily use a number 0-9 to fill data
                    akey_size_idx = akey_idx % len(self.akey_sizes)
                    data_size = self.akey_sizes[akey_size_idx]
                    akey_extent_idx = akey_idx % len(self.akey_extents)
                    num_extents = self.akey_extents[akey_extent_idx]
                    c_data = []
                    akey = "akey array {}".format(akey_idx)
                    c_akey = ctypes.create_string_buffer(akey.encode())
                    for data_idx in range(num_extents):
                        data_val = str(data_idx % 10)
                        data = data_size * data_val
                        c_data.append([
                            ctypes.create_string_buffer(data.encode()),
                            data_size
                        ])
                    ioreq.insert_array(c_dkey, c_akey, c_data)
            obj.close()
        cont.close()
Пример #9
0
    def write_objects_insert_keys(self, container, objtype):
        """Write objects and insert dkeys and akeys in them.

        Args:
            container (DaosContainer): Container.
            objtype (str): Object class.
        """
        for i in range(self.obj_count):
            self.ioreqs.append(
                IORequest(context=self.context,
                          container=container,
                          obj=None,
                          objtype=objtype))

            # Prepare 2 dkeys and 1 akey in each dkey. Use the same akey for
            # both dkeys.
            dkey_str_a = b"Sample dkey A %d" % i
            dkey_str_b = b"Sample dkey B %d" % i
            akey_str = b"Sample akey %d" % i
            data_str = b"Sample data %d" % i
            self.dkey_strs_a.append(dkey_str_a)
            self.dkey_strs_b.append(dkey_str_b)
            self.akey_strs.append(akey_str)
            data = create_string_buffer(data_str)

            # Pass in length of the key so that it won't have \0 termination.
            # Not necessary here because we're not interested in the list
            # output. Just for debugging.
            self.dkeys_a.append(
                create_string_buffer(value=dkey_str_a, size=len(dkey_str_a)))
            self.dkeys_b.append(
                create_string_buffer(value=dkey_str_b, size=len(dkey_str_b)))
            akey = create_string_buffer(value=akey_str, size=len(akey_str))
            c_size = ctypes.c_size_t(ctypes.sizeof(data))

            # Insert the dkeys.
            self.ioreqs[-1].single_insert(dkey=self.dkeys_a[-1],
                                          akey=akey,
                                          value=data,
                                          size=c_size)
            self.ioreqs[-1].single_insert(dkey=self.dkeys_b[-1],
                                          akey=akey,
                                          value=data,
                                          size=c_size)
Пример #10
0
class OSAUtils(TestWithServers):
    # pylint: disable=too-many-ancestors
    """
    Test Class Description: This test runs
    daos_server offline drain test cases.

    :avocado: recursive
    """
    def setUp(self):
        """Set up for test case."""
        super(OSAUtils, self).setUp()
        self.container = None
        self.obj = None
        self.ioreq = None
        self.dmg_command = self.get_dmg_command()
        self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0]
        self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0]
        self.record_length = self.params.get("length", '/run/record/*')[0]

    @fail_on(CommandFailure)
    def get_pool_leader(self):
        """Get the pool leader.

        Returns:
            int: pool leader value

        """
        data = self.dmg_command.pool_query(self.pool.uuid)
        return int(data["leader"])

    @fail_on(CommandFailure)
    def get_pool_version(self):
        """Get the pool version.

        Returns:
            int: pool_version_value

        """
        data = self.dmg_command.pool_query(self.pool.uuid)
        return int(data["version"])

    @fail_on(DaosApiError)
    def write_single_object(self):
        """Write some data to the existing pool."""
        self.pool.connect(2)
        csum = self.params.get("enable_checksum", '/run/container/*')
        self.container = DaosContainer(self.context)
        input_param = self.container.cont_input_values
        input_param.enable_chksum = csum
        self.container.create(poh=self.pool.pool.handle, con_prop=input_param)
        self.container.open()
        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj,
                               objtype=4)
        self.log.info("Writing the Single Dataset")
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0]) * self.record_length)
                d_key_value = "dkey {0}".format(dkey)
                c_dkey = ctypes.create_string_buffer(d_key_value)
                a_key_value = "akey {0}".format(akey)
                c_akey = ctypes.create_string_buffer(a_key_value)
                c_value = ctypes.create_string_buffer(indata)
                c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
                self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size)
        self.obj.close()
        self.container.close()

    @fail_on(DaosApiError)
    def verify_single_object(self):
        """Verify the container data on the existing pool."""
        self.pool.connect(2)
        self.container.open()
        self.obj.open()
        self.log.info("Single Dataset Verification -- Started")
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0]) * self.record_length)
                c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
                c_akey = ctypes.create_string_buffer("akey {0}".format(akey))
                val = self.ioreq.single_fetch(c_dkey, c_akey, len(indata) + 1)
                if indata != (repr(val.value)[1:-1]):
                    self.d_log.error("ERROR:Data mismatch for "
                                     "dkey = {0}, "
                                     "akey = {1}".format(
                                         "dkey {0}".format(dkey),
                                         "akey {0}".format(akey)))
                    self.fail(
                        "ERROR: Data mismatch for dkey = {0}, akey={1}".format(
                            "dkey {0}".format(dkey), "akey {0}".format(akey)))
        self.obj.close()
        self.container.close()
Пример #11
0
class ObjectDataValidation(avocado.Test):
    """
    Test Class Description:
        Tests that create Different length records,
        Disconnect the pool/container and reconnect,
        validate the data after reconnect.
    """

    # pylint: disable=too-many-instance-attributes
    def setUp(self):
        self.agent_sessions = None
        self.pool = None
        self.container = None
        self.obj = None
        self.ioreq = None
        self.hostlist = None
        self.hostfile = None
        self.no_of_dkeys = None
        self.no_of_akeys = None
        self.array_size = None
        self.record_length = None

        with open('../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        server_group = self.params.get("name", '/server_config/',
                                       'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib64/')
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(
            self.hostlist, self.workdir)
        self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0]
        self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0]
        self.array_size = self.params.get("size", '/array_size/')
        self.record_length = self.params.get("length", '/run/record/*')

        self.agent_sessions = agent_utils.run_agent(self.basepath,
                                                    self.hostlist)
        server_utils.run_server(self, self.hostfile, server_group)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(), os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         None)
        self.pool.connect(2)

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.handle)
        self.container.open()

        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj,
                               objtype=4)

    def tearDown(self):
        try:
            if self.container:
                self.container.close()
                self.container.destroy()
            if self.pool:
                self.pool.disconnect()
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                agent_utils.stop_agent(self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    def reconnect(self):
        '''
        Function to reconnect the pool/container and reopen the Object
        for read verification.
        '''
        #Close the Obj/Container, Disconnect the Pool.
        self.obj.close()
        self.container.close()
        self.pool.disconnect()
        time.sleep(5)
        #Connect Pool, Open Container and Object
        self.pool.connect(2)
        self.container.open()
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj,
                               objtype=4)

    @avocado.fail_on(DaosApiError)
    def test_invalid_tx_commit_close(self):
        """
        Test ID:
            (1)DAOS-1346: Verify commit tx bad parameter behavior.
            (2)DAOS-1343: Verify tx_close bad parameter behavior.
            (3)DAOS-1342: Verify tx_close through daos_api.
            (4)DAOS-1338: Add and verify tx_abort through daos_api.
            (5)DAOS-1339: Verify tx_abort bad parameter behavior.
        Test Description:
            Write Avocado Test to verify commit tx and close tx
                          bad parameter behavior.
        :avocado: tags=all,object,full_regression,small,invalid_tx
        """
        self.d_log.info("==Writing the Single Dataset for negative test...")
        record_index = 0
        expected_error = "RC: -1002"
        dkey = 0
        akey = 0
        indata = ("{0}".format(str(akey)[0]) *
                  self.record_length[record_index])
        c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
        c_akey = ctypes.create_string_buffer("akey {0}".format(akey))
        c_value = ctypes.create_string_buffer(indata)
        c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
        try:
            new_transaction = self.container.get_new_tx()
        except DaosApiError as excep:
            #initial container get_new_tx failed, skip rest of the test
            self.fail("##container get_new_tx failed: {}".format(excep))
        invalid_transaction = new_transaction + random.randint(1000, 383838)
        self.log.info("==new_transaction=     %s", new_transaction)
        self.log.info("==invalid_transaction= %s", invalid_transaction)
        self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size,
                                 new_transaction)
        try:
            self.container.commit_tx(invalid_transaction)
            self.fail("##(1.1)Container.commit_tx passing with invalid handle")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.log.info(
                "==(1)Expecting failure: invalid Container.commit_tx.")
            if expected_error not in str(excep):
                self.fail(
                    "##(1.2)Expecting error RC: -1002, but got {}.".format(
                        str(excep)))
        try:
            self.container.close_tx(invalid_transaction)
            self.fail("##(2.1)Container.close_tx passing with invalid handle")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.log.info(
                "==(2)Expecting failure: invalid Container.commit_tx.")
            if expected_error not in str(excep):
                self.fail(
                    "##(2.2)Expecting error RC: -1002, but got {}.".format(
                        str(excep)))
        try:
            self.container.close_tx(new_transaction)
            self.log.info("==(3)container.close_tx test passed.")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.fail("##(3)Failed on close_tx.")

        try:
            self.container.abort_tx(invalid_transaction)
            self.fail("##(4.1)Container.abort_tx passing with invalid handle")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.log.info(
                "==(4)Expecting failure: invalid Container.abort_tx.")
            if expected_error not in str(excep):
                self.fail(
                    "##(4.2)Expecting error RC: -1002, but got {}.".format(
                        str(excep)))

        #Try to abort the transaction which already closed.
        try:
            self.container.abort_tx(new_transaction)
            self.fail("##(5.1)Container.abort_tx passing with a closed handle")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.log.info(
                "==(5)Expecting failure: Container.abort_tx closed handle.")
            if expected_error not in str(excep):
                self.fail(
                    "##(5.2)Expecting error RC: -1002, but got {}.".format(
                        str(excep)))

        #open another transaction for abort test
        try:
            new_transaction2 = self.container.get_new_tx()
        except DaosApiError as excep:
            self.fail("##(6.1)container get_new_tx failed: {}".format(excep))
        self.log.info("==new_transaction2=     %s", new_transaction2)
        self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size,
                                 new_transaction2)
        try:
            self.container.abort_tx(new_transaction2)
            self.log.info("==(6)container.abort_tx test passed.")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.fail("##(6.2)Failed on abort_tx.")

    @avocado.fail_on(DaosApiError)
    @skipForTicket("DAOS-3208")
    def test_single_object_validation(self):
        """
        Test ID: DAOS-707
        Test Description: Write Avocado Test to verify single data after
                          pool/container disconnect/reconnect.
        :avocado: tags=all,object,full_regression,small,single_object
        """
        self.d_log.info("Writing the Single Dataset")
        record_index = 0
        transaction = []
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0]) *
                          self.record_length[record_index])
                c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
                c_akey = ctypes.create_string_buffer("akey {0}".format(akey))
                c_value = ctypes.create_string_buffer(indata)
                c_size = ctypes.c_size_t(ctypes.sizeof(c_value))

                new_transaction = self.container.get_new_tx()
                self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size,
                                         new_transaction)
                self.container.commit_tx(new_transaction)
                transaction.append(new_transaction)
                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0

        self.reconnect()

        self.d_log.info("Single Dataset Verification -- Started")
        record_index = 0
        transaction_index = 0
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0]) *
                          self.record_length[record_index])
                c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
                c_akey = ctypes.create_string_buffer("akey {0}".format(akey))
                val = self.ioreq.single_fetch(c_dkey, c_akey, len(indata) + 1)
                if indata != (repr(val.value)[1:-1]):
                    self.d_log.error("ERROR:Data mismatch for "
                                     "dkey = {0}, "
                                     "akey = {1}".format(
                                         "dkey {0}".format(dkey),
                                         "akey {0}".format(akey)))
                    self.fail(
                        "ERROR: Data mismatch for dkey = {0}, akey={1}".format(
                            "dkey {0}".format(dkey), "akey {0}".format(akey)))

                transaction_index = transaction_index + 1
                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0

    @avocado.fail_on(DaosApiError)
    @skipForTicket("DAOS-3208")
    def test_array_object_validation(self):
        """
        Test ID: DAOS-707
        Test Description: Write Avocado Test to verify Array data after
                          pool/container disconnect/reconnect.
        :avocado: tags=all,object,full_regression,small,array_object
        """
        self.d_log.info("Writing the Array Dataset")
        record_index = 0
        transaction = []
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                c_values = []
                value = ("{0}".format(str(akey)[0]) *
                         self.record_length[record_index])
                for item in range(self.array_size):
                    c_values.append(
                        (ctypes.create_string_buffer(value), len(value) + 1))
                c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
                c_akey = ctypes.create_string_buffer("akey {0}".format(akey))

                new_transaction = self.container.get_new_tx()
                self.ioreq.insert_array(c_dkey, c_akey, c_values,
                                        new_transaction)
                self.container.commit_tx(new_transaction)
                transaction.append(new_transaction)

                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0

        self.reconnect()

        self.d_log.info("Array Dataset Verification -- Started")
        record_index = 0
        transaction_index = 0
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = []
                value = ("{0}".format(str(akey)[0]) *
                         self.record_length[record_index])
                for item in range(self.array_size):
                    indata.append(value)
                c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
                c_akey = ctypes.create_string_buffer("akey {0}".format(akey))
                c_rec_count = ctypes.c_uint(len(indata))
                c_rec_size = ctypes.c_size_t(len(indata[0]) + 1)

                outdata = self.ioreq.fetch_array(c_dkey, c_akey, c_rec_count,
                                                 c_rec_size)

                for item in enumerate(indata):
                    if indata[item[0]] != outdata[item[0]][:-1]:
                        self.d_log.error("ERROR:Data mismatch for "
                                         "dkey = {0}, "
                                         "akey = {1}".format(
                                             "dkey {0}".format(dkey),
                                             "akey {0}".format(akey)))
                        self.fail(
                            "ERROR:Data mismatch for dkey = {0}, akey={1}".
                            format("dkey {0}".format(dkey),
                                   "akey {0}".format(akey)))

                transaction_index = transaction_index + 1
                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0
Пример #12
0
class ChecksumContainerValidation(TestWithServers):
    """
    Test Class Description: This test is enables
    checksum container properties and performs
    single object inserts and verifies
    contents. This is a basic sanity
    test for enabling checksum testing.
    This test case doesn't use TestPool/
    TestContainer for now. TestPool/TestContainer
    needs changes to support checksum.
    :avocado: recursive
    """

    # pylint: disable=too-many-instance-attributes
    def setUp(self):
        super(ChecksumContainerValidation, self).setUp()
        self.agent_sessions = None
        self.pool = None
        self.container = None
        self.obj = None
        self.ioreq = None
        self.no_of_dkeys = None
        self.no_of_akeys = None
        self.array_size = None
        self.record_length = None

        self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0]
        self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0]
        self.record_length = self.params.get("length", '/run/record/*')

        self.pool = TestPool(self.context)
        self.pool.get_params(self)
        self.pool.create()
        self.pool.connect(2)

        self.csum = self.params.get("enable_checksum", '/run/container/*')
        self.container = DaosContainer(self.context)
        input_param = self.container.cont_input_values
        input_param.enable_chksum = self.csum
        self.container.create(poh=self.pool.pool.handle, con_prop=input_param)
        self.container.open()

        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj,
                               objtype=4)

    def test_single_object_with_checksum(self):
        """
        Test ID: DAOS-3927
        Test Description: Write Avocado Test to verify single data after
                          pool/container disconnect/reconnect.
        :avocado: tags=all,full_regression,pr,basic_checksum_object
        """
        self.d_log.info("Writing the Single Dataset")
        record_index = 0
        transaction = []
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0]) *
                          self.record_length[record_index])
                c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
                c_akey = ctypes.create_string_buffer("akey {0}".format(akey))
                c_value = ctypes.create_string_buffer(indata)
                c_size = ctypes.c_size_t(ctypes.sizeof(c_value))

                new_transaction = self.container.get_new_tx()
                self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size,
                                         new_transaction)
                self.container.commit_tx(new_transaction)
                transaction.append(new_transaction)
                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0

        self.d_log.info("Single Dataset Verification -- Started")
        record_index = 0
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0]) *
                          self.record_length[record_index])
                c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
                c_akey = ctypes.create_string_buffer("akey {0}".format(akey))
                val = self.ioreq.single_fetch(c_dkey, c_akey, len(indata) + 1)
                if indata != (repr(val.value)[1:-1]):
                    self.d_log.error("ERROR:Data mismatch for "
                                     "dkey = {0}, "
                                     "akey = {1}".format(
                                         "dkey {0}".format(dkey),
                                         "akey {0}".format(akey)))
                    self.fail(
                        "ERROR: Data mismatch for dkey = {0}, akey={1}".format(
                            "dkey {0}".format(dkey), "akey {0}".format(akey)))

                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0
Пример #13
0
class CsumContainerValidation(TestWithServers):
    """
    Test Class Description: This test is enables
    checksum container properties and performs
    single object inserts and verifies
    contents. This is a basic sanity
    test for enabling checksum testing.
    :avocado: recursive
    """

    # pylint: disable=too-many-instance-attributes
    def setUp(self):
        super().setUp()
        self.agent_sessions = None
        self.pool = None
        self.container = None
        self.obj = None
        self.ioreq = None
        self.no_of_dkeys = None
        self.no_of_akeys = None
        self.array_size = None
        self.record_length = None

        self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0]
        self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0]
        self.record_length = self.params.get("length", '/run/record/*')

        self.add_pool(connect=False)
        self.pool.connect(2)

        self.csum = self.params.get("enable_checksum", '/run/container/*')
        self.container = DaosContainer(self.context)
        input_param = self.container.cont_input_values
        input_param.enable_chksum = self.csum
        self.container.create(poh=self.pool.pool.handle,
                              con_prop=input_param)
        self.container.open()

        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj, objtype=4)

    def test_single_object_with_checksum(self):
        """
        Test ID: DAOS-3927
        Test Description: Write Avocado Test to verify single data after
                          pool/container disconnect/reconnect.
        :avocado: tags=all,daily_regression
        :avocado: tags=checksum
        :avocado: tags=basic_checksum_object
        """
        self.d_log.info("Writing the Single Dataset")
        record_index = 0
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0])
                          * self.record_length[record_index])
                c_dkey = create_string_buffer("dkey {0}".format(dkey))
                c_akey = create_string_buffer("akey {0}".format(akey))
                c_value = create_string_buffer(indata)
                c_size = ctypes.c_size_t(ctypes.sizeof(c_value))

                self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size)
                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0

        self.d_log.info("Single Dataset Verification -- Started")
        record_index = 0
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0]) *
                          self.record_length[record_index])
                c_dkey = create_string_buffer("dkey {0}".format(dkey))
                c_akey = create_string_buffer("akey {0}".format(akey))
                val = self.ioreq.single_fetch(c_dkey,
                                              c_akey,
                                              len(indata)+1)
                if indata != val.value.decode('utf-8'):
                    message = (
                        "ERROR:Data mismatch for dkey={}, akey={}: indata={}, "
                        "val={}".format(
                            dkey, akey, indata, val.value.decode('utf-8')))
                    self.d_log.error(message)
                    self.fail(message)
                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0
Пример #14
0
class OSAUtils(MdtestBase, IorTestBase):
    # pylint: disable=too-many-ancestors
    """
    Test Class Description: This test runs
    daos_server offline drain test cases.

    :avocado: recursive
    """
    def setUp(self):
        """Set up for test case."""
        super().setUp()
        self.pool_cont_dict = {}
        self.container = None
        self.obj = None
        self.ioreq = None
        self.dmg_command = self.get_dmg_command()
        self.no_of_dkeys = self.params.get("no_of_dkeys",
                                           '/run/dkeys/*',
                                           default=[0])[0]
        self.no_of_akeys = self.params.get("no_of_akeys",
                                           '/run/akeys/*',
                                           default=[0])[0]
        self.record_length = self.params.get("length",
                                             '/run/record/*',
                                             default=[0])[0]
        self.ior_w_flags = self.params.get("write_flags",
                                           '/run/ior/iorflags/*',
                                           default="")
        self.ior_r_flags = self.params.get("read_flags", '/run/ior/iorflags/*')
        self.out_queue = queue.Queue()
        self.dmg_command.exit_status_exception = False
        self.test_during_aggregation = False
        self.test_during_rebuild = False
        self.test_with_checksum = True

    @fail_on(CommandFailure)
    def get_pool_leader(self):
        """Get the pool leader.

        Returns:
            int: pool leader value

        """
        data = self.dmg_command.pool_query(self.pool.uuid)
        return int(data["response"]["leader"])

    @fail_on(CommandFailure)
    def get_rebuild_status(self):
        """Get the rebuild status.

        Returns:
            str: reuild status

        """
        data = self.dmg_command.pool_query(self.pool.uuid)
        return data["response"]["rebuild"]["status"]

    @fail_on(CommandFailure)
    def is_rebuild_done(self,
                        time_interval,
                        wait_for_rebuild_to_complete=False):
        """Rebuild is completed/done.
        Args:
            time_interval: Wait interval between checks
            wait_for_rebuild_to_complete: Rebuild completed
                                          (Default: False)
        """
        self.pool.wait_for_rebuild(wait_for_rebuild_to_complete,
                                   interval=time_interval)

    @fail_on(CommandFailure)
    def assert_on_rebuild_failure(self):
        """If the rebuild is not successful,
        raise assert.
        """
        rebuild_status = self.get_rebuild_status()
        self.log.info("Rebuild Status: %s", rebuild_status)
        rebuild_failed_string = ["failed", "scanning", "aborted", "busy"]
        self.assertTrue(rebuild_status not in rebuild_failed_string,
                        "Rebuild failed")

    @fail_on(CommandFailure)
    def print_and_assert_on_rebuild_failure(self, out, timeout=3):
        """Print the out value (daos, dmg, etc) and check for rebuild
        completion. If not, raise assert.
        """
        self.log.info(out)
        self.is_rebuild_done(timeout)
        self.assert_on_rebuild_failure()

    @fail_on(CommandFailure)
    def get_pool_version(self):
        """Get the pool version.

        Returns:
            int: pool_version_value

        """
        data = self.dmg_command.pool_query(self.pool.uuid)
        return int(data["response"]["version"])

    def set_container(self, container):
        """Set the OSA utils container object.
        Args:
            container (obj) : Container object to be used
                              within OSA utils.
        """
        self.container = container

    def simple_exclude_reintegrate_loop(self, rank, loop_time=100):
        """This method performs exclude and reintegration on a rank,
        for a certain amount of time.
        """
        start_time = 0
        finish_time = 0
        while int(finish_time - start_time) > loop_time:
            start_time = time.time()
            output = self.dmg_command.pool_exclude(self.pool.uuid, rank)
            self.print_and_assert_on_rebuild_failure(output)
            output = self.dmg_command.pool_reintegrate(self.pool.uuid, rank)
            self.print_and_assert_on_rebuild_failure(output)

    @fail_on(DaosApiError)
    def write_single_object(self):
        """Write some data to the existing pool."""
        self.pool.connect(2)
        csum = self.params.get("enable_checksum", '/run/container/*')
        self.container = DaosContainer(self.context)
        input_param = self.container.cont_input_values
        input_param.enable_chksum = csum
        self.container.create(poh=self.pool.pool.handle, con_prop=input_param)
        self.container.open()
        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj,
                               objtype=4)
        self.log.info("Writing the Single Dataset")
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0]) * self.record_length)
                d_key_value = "dkey {0}".format(dkey)
                c_dkey = create_string_buffer(d_key_value)
                a_key_value = "akey {0}".format(akey)
                c_akey = create_string_buffer(a_key_value)
                c_value = create_string_buffer(indata)
                c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
                self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size)
        self.obj.close()
        self.container.close()

    @fail_on(DaosApiError)
    def verify_single_object(self):
        """Verify the container data on the existing pool."""
        self.pool.connect(2)
        self.container.open()
        self.obj.open()
        self.log.info("Single Dataset Verification -- Started")
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0]) * self.record_length)
                c_dkey = create_string_buffer("dkey {0}".format(dkey))
                c_akey = create_string_buffer("akey {0}".format(akey))
                val = self.ioreq.single_fetch(c_dkey, c_akey, len(indata) + 1)
                if indata != (repr(val.value)[1:-1]):
                    self.d_log.error("ERROR:Data mismatch for "
                                     "dkey = {0}, "
                                     "akey = {1}".format(
                                         "dkey {0}".format(dkey),
                                         "akey {0}".format(akey)))
                    self.fail(
                        "ERROR: Data mismatch for dkey = {0}, akey={1}".format(
                            "dkey {0}".format(dkey), "akey {0}".format(akey)))
        self.obj.close()
        self.container.close()

    def prepare_cont_ior_write_read(self, oclass, flags):
        """This method prepares the containers for
        IOR write and read invocations.
            To enable aggregation:
            - Create two containers and read always from
              first container
            Normal usage (use only a single container):
            - Create a single container and use the same.
        Args:
            oclass (str): IOR object class
            flags (str): IOR flags
        """
        self.log.info(self.pool_cont_dict)
        # If pool is not in the dictionary,
        # initialize its container list to None
        # {poolA : [None, None], [None, None]}
        if self.pool not in self.pool_cont_dict:
            self.pool_cont_dict[self.pool] = [None] * 4
        # Create container if the pool doesn't have one.
        # Otherwise, use the existing container in the pool.
        # pool_cont_dict {pool A: [containerA, Updated,
        #                          containerB, Updated],
        #                 pool B : containerA, Updated,
        #                          containerB, None]}
        if self.pool_cont_dict[self.pool][0] is None:
            self.add_container(self.pool, create=False)
            self.set_cont_class_properties(oclass)
            if self.test_with_checksum is False:
                tmp = self.get_object_replica_value(oclass)
                rf_value = "rf:{}".format(tmp - 1)
                self.update_cont_properties(rf_value)
            self.container.create()
            self.pool_cont_dict[self.pool][0] = self.container
            self.pool_cont_dict[self.pool][1] = "Updated"
        else:
            if ((self.test_during_aggregation is True)
                    and (self.pool_cont_dict[self.pool][1] == "Updated")
                    and (self.pool_cont_dict[self.pool][3] is None)
                    and ("-w" in flags)):
                # Write to the second container
                self.add_container(self.pool, create=False)
                self.set_cont_class_properties(oclass)
                if self.test_with_checksum is False:
                    tmp = self.get_object_replica_value(oclass)
                    rf_value = "rf:{}".format(tmp - 1)
                    self.update_cont_properties(rf_value)
                self.container.create()
                self.pool_cont_dict[self.pool][2] = self.container
                self.pool_cont_dict[self.pool][3] = "Updated"
            else:
                self.container = self.pool_cont_dict[self.pool][0]

    def delete_extra_container(self, pool):
        """Delete the extra container in the pool.
        Refer prepare_cont_ior_write_read. This method
        should be called when OSA tests intend to
        enable aggregation.
        Args:
            pool (object): pool handle
        """
        self.pool.set_property("reclaim", "time")
        extra_container = self.pool_cont_dict[pool][2]
        extra_container.destroy()
        self.pool_cont_dict[pool][3] = None

    def get_object_replica_value(self, oclass):
        """ Get the object replica value for an object class.

        Args:
            oclass (str): Object Class (eg: RP_2G1,etc)

        Returns:
            value (int) : Object replica value
        """
        value = 0
        if "_" in oclass:
            replica_list = oclass.split("_")
            value = replica_list[1][0]
        else:
            self.log.info("Wrong Object Class. Cannot split")
        return int(value)

    def update_cont_properties(self, cont_prop):
        """Update the existing container properties.
        Args:
            cont_prop (str): Replace existing container properties
                             with new value
        """
        self.container.properties.value = cont_prop

    def set_cont_class_properties(self, oclass="S1"):
        """Update the container class to match the IOR object
        class. Fix the rf factor based on object replica value.
        Also, remove the redundancy factor for S type
        object class.
        Args:
            oclass (str, optional): Container object class to be set.
                                    Defaults to "S1".
        """
        self.container.oclass.value = oclass
        # Set the container properties properly for S!, S2 class.
        # rf should not be set to 1 for S type object class.
        x = re.search("^S\\d$", oclass)
        prop = self.container.properties.value
        if x is not None:
            prop = prop.replace("rf:1", "rf:0")
        else:
            tmp = self.get_object_replica_value(oclass)
            rf_value = "rf:{}".format(tmp - 1)
            prop = prop.replace("rf:1", rf_value)
        self.container.properties.value = prop

    def assert_on_exception(self, out_queue=None):
        """Assert on exception while executing an application.

        Args:
            out_queue (queue): Check whether the queue is
            empty. If empty, app (ior, mdtest) didn't encounter error.
        """
        if out_queue is None:
            out_queue = self.out_queue
        if out_queue.empty():
            pass
        else:
            exc = out_queue.get(block=False)
            out_queue.put(exc)
            raise exc

    def cleanup_queue(self, out_queue=None):
        """Cleanup the existing thread queue.

        Args:
            out_queue (queue): Queue to cleanup.
        """
        if out_queue is None:
            out_queue = self.out_queue
        while not out_queue.empty():
            out_queue.get(block=True)

    def run_ior_thread(self,
                       action,
                       oclass,
                       test,
                       single_cont_read=True,
                       fail_on_warning=True):
        """Start the IOR thread for either writing or
        reading data to/from a container.
        Args:
            action (str): Start the IOR thread with Read or
                          Write
            oclass (str): IOR object class
            test (list): IOR test sequence
            flags (str): IOR flags
            single_cont_read (bool) : Always read from the
                                      1st container.
                                      Defaults to True.
            fail_on_warning (bool)  : Test terminates
                                      for IOR warnings.
                                      Defaults to True.
        """
        self.cleanup_queue()
        if action == "Write":
            flags = self.ior_w_flags
        else:
            flags = self.ior_r_flags

        # Add a thread for these IOR arguments
        process = threading.Thread(target=self.ior_thread,
                                   kwargs={
                                       "pool": self.pool,
                                       "oclass": oclass,
                                       "test": test,
                                       "flags": flags,
                                       "single_cont_read": single_cont_read,
                                       "fail_on_warning": fail_on_warning
                                   })
        # Launch the IOR thread
        process.start()
        # Wait for the thread to finish
        try:
            process.join()
        except CommandFailure as err_msg:
            self.out_queue.put(err_msg)
            self.assert_on_exception()

    def ior_thread(self,
                   pool,
                   oclass,
                   test,
                   flags,
                   single_cont_read=True,
                   fail_on_warning=True):
        """Start an IOR thread.

        Args:
            pool (object): pool handle
            oclass (str): IOR object class, container class.
            test (list): IOR test sequence
            flags (str): IOR flags
            single_cont_read (bool) : Always read from the
                                      1st container.
                                      Defaults to True.
            fail_on_warning (bool)  : Test terminates
                                      for IOR warnings.
                                      Defaults to True.
        """
        self.cleanup_queue()
        self.pool = pool
        self.ior_cmd.get_params(self)
        self.ior_cmd.set_daos_params(self.server_group, self.pool)
        self.ior_cmd.dfs_oclass.update(oclass)
        self.ior_cmd.dfs_dir_oclass.update(oclass)
        if single_cont_read is True:
            # Prepare the containers created and use in a specific
            # way defined in prepare_cont_ior_write.
            self.prepare_cont_ior_write_read(oclass, flags)
        elif single_cont_read is False and self.container is not None:
            # Here self.container is having actual value. Just use it.
            self.log.info(self.container)
        else:
            self.fail("Not supported option on ior_thread")
        try:
            job_manager = self.get_ior_job_manager_command()
        except CommandFailure as err_msg:
            self.out_queue.put(err_msg)
            self.assert_on_exception()
        job_manager.job.dfs_cont.update(self.container.uuid)
        self.ior_cmd.transfer_size.update(test[2])
        self.ior_cmd.block_size.update(test[3])
        self.ior_cmd.flags.update(flags)
        try:
            self.run_ior_with_pool(create_pool=False,
                                   create_cont=False,
                                   fail_on_warning=fail_on_warning)
        except CommandFailure as err_msg:
            self.out_queue.put(err_msg)
            self.assert_on_exception()

    def run_mdtest_thread(self):
        """Start mdtest thread and wait until thread completes.
        """
        # Create container only
        self.mdtest_cmd.dfs_destroy = False
        if self.container is None:
            self.add_container(self.pool, create=False)
            self.set_cont_class_properties(self.mdtest_cmd.dfs_oclass)
            if self.test_with_checksum is False:
                tmp = self.get_object_replica_value(self.mdtest_cmd.dfs_oclass)
                rf_value = "rf:{}".format(tmp - 1)
                self.update_cont_properties(rf_value)
            self.container.create()
        job_manager = self.get_mdtest_job_manager_command(self.manager)
        job_manager.job.dfs_cont.update(self.container.uuid)
        # Add a thread for these IOR arguments
        process = threading.Thread(target=self.execute_mdtest)
        # Launch the MDtest thread
        process.start()
        # Wait for the thread to finish
        try:
            process.join()
        except CommandFailure as err_msg:
            self.out_queue.put(err_msg)
            self.assert_on_exception()
Пример #15
0
class ObjectDataValidation(TestWithServers):
    """
    Test Class Description:
        Tests that create Different length records,
        Disconnect the pool/container and reconnect,
        validate the data after reconnect.

    :avocado: recursive
    """

    # pylint: disable=too-many-instance-attributes
    def setUp(self):
        super(ObjectDataValidation, self).setUp()
        self.obj = None
        self.ioreq = None
        self.no_of_dkeys = None
        self.no_of_akeys = None
        self.array_size = None
        self.record_length = None
        self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0]
        self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0]
        self.array_size = self.params.get("size", '/array_size/')
        self.record_length = self.params.get("length", '/run/record/*')

        self.prepare_pool()

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.pool.handle)
        self.container.open()

        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj,
                               objtype=4)

    def reconnect(self):
        '''
        Function to reconnect the pool/container and reopen the Object
        for read verification.
        '''
        #Close the Obj/Container, Disconnect the Pool.
        self.obj.close()
        self.container.close()
        self.pool.disconnect()
        time.sleep(5)
        #Connect Pool, Open Container and Object
        self.pool.connect(2)
        self.container.open()
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj,
                               objtype=4)

    @avocado.fail_on(DaosApiError)
    def test_invalid_tx_commit_close(self):
        """
        Test ID:
            (1)DAOS-1346: Verify commit tx bad parameter behavior.
            (2)DAOS-1343: Verify tx_close bad parameter behavior.
            (3)DAOS-1342: Verify tx_close through daos_api.
            (4)DAOS-1338: Add and verify tx_abort through daos_api.
            (5)DAOS-1339: Verify tx_abort bad parameter behavior.
        Test Description:
            Write Avocado Test to verify commit tx and close tx
                          bad parameter behavior.
        :avocado: tags=all,object,full_regression,small,invalid_tx
        """
        self.d_log.info("==Writing the Single Dataset for negative test...")
        record_index = 0
        expected_error = "RC: -1002"
        dkey = 0
        akey = 0
        indata = ("{0}".format(str(akey)[0]) *
                  self.record_length[record_index])
        c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
        c_akey = ctypes.create_string_buffer("akey {0}".format(akey))
        c_value = ctypes.create_string_buffer(indata)
        c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
        try:
            new_transaction = self.container.get_new_tx()
        except DaosApiError as excep:
            #initial container get_new_tx failed, skip rest of the test
            self.fail("##container get_new_tx failed: {}".format(excep))
        invalid_transaction = new_transaction + random.randint(1000, 383838)
        self.log.info("==new_transaction=     %s", new_transaction)
        self.log.info("==invalid_transaction= %s", invalid_transaction)
        self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size,
                                 new_transaction)
        try:
            self.container.commit_tx(invalid_transaction)
            self.fail("##(1.1)Container.commit_tx passing with invalid handle")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.log.info(
                "==(1)Expecting failure: invalid Container.commit_tx.")
            if expected_error not in str(excep):
                self.fail(
                    "##(1.2)Expecting error RC: -1002, but got {}.".format(
                        str(excep)))
        try:
            self.container.close_tx(invalid_transaction)
            self.fail("##(2.1)Container.close_tx passing with invalid handle")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.log.info(
                "==(2)Expecting failure: invalid Container.commit_tx.")
            if expected_error not in str(excep):
                self.fail(
                    "##(2.2)Expecting error RC: -1002, but got {}.".format(
                        str(excep)))
        try:
            self.container.close_tx(new_transaction)
            self.log.info("==(3)container.close_tx test passed.")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.fail("##(3)Failed on close_tx.")

        try:
            self.container.abort_tx(invalid_transaction)
            self.fail("##(4.1)Container.abort_tx passing with invalid handle")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.log.info(
                "==(4)Expecting failure: invalid Container.abort_tx.")
            if expected_error not in str(excep):
                self.fail(
                    "##(4.2)Expecting error RC: -1002, but got {}.".format(
                        str(excep)))

        #Try to abort the transaction which already closed.
        try:
            self.container.abort_tx(new_transaction)
            self.fail("##(5.1)Container.abort_tx passing with a closed handle")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.log.info(
                "==(5)Expecting failure: Container.abort_tx closed handle.")
            if expected_error not in str(excep):
                self.fail(
                    "##(5.2)Expecting error RC: -1002, but got {}.".format(
                        str(excep)))

        #open another transaction for abort test
        try:
            new_transaction2 = self.container.get_new_tx()
        except DaosApiError as excep:
            self.fail("##(6.1)container get_new_tx failed: {}".format(excep))
        self.log.info("==new_transaction2=     %s", new_transaction2)
        self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size,
                                 new_transaction2)
        try:
            self.container.abort_tx(new_transaction2)
            self.log.info("==(6)container.abort_tx test passed.")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.fail("##(6.2)Failed on abort_tx.")

        self.container.close_tx(new_transaction2)

    @avocado.fail_on(DaosApiError)
    def test_single_object_validation(self):
        """
        Test ID: DAOS-707
        Test Description: Write Avocado Test to verify single data after
                          pool/container disconnect/reconnect.
        :avocado: tags=all,object,full_regression,small,single_object
        """
        self.d_log.info("Writing the Single Dataset")
        record_index = 0
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0]) *
                          self.record_length[record_index])
                c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
                c_akey = ctypes.create_string_buffer("akey {0}".format(akey))
                c_value = ctypes.create_string_buffer(indata)
                c_size = ctypes.c_size_t(ctypes.sizeof(c_value))

                self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size)
                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0

        self.reconnect()

        self.d_log.info("Single Dataset Verification -- Started")
        record_index = 0
        transaction_index = 0
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0]) *
                          self.record_length[record_index])
                c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
                c_akey = ctypes.create_string_buffer("akey {0}".format(akey))
                val = self.ioreq.single_fetch(c_dkey, c_akey, len(indata) + 1)
                if indata != (repr(val.value)[1:-1]):
                    self.d_log.error("ERROR:Data mismatch for "
                                     "dkey = {0}, "
                                     "akey = {1}".format(
                                         "dkey {0}".format(dkey),
                                         "akey {0}".format(akey)))
                    self.fail(
                        "ERROR: Data mismatch for dkey = {0}, akey={1}".format(
                            "dkey {0}".format(dkey), "akey {0}".format(akey)))

                transaction_index = transaction_index + 1
                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0

    @avocado.fail_on(DaosApiError)
    def test_array_object_validation(self):
        """
        Test ID: DAOS-707
        Test Description: Write Avocado Test to verify Array data after
                          pool/container disconnect/reconnect.
        :avocado: tags=all,object,full_regression,small,array_object
        """
        self.d_log.info("Writing the Array Dataset")
        record_index = 0
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                c_values = []
                value = ("{0}".format(str(akey)[0]) *
                         self.record_length[record_index])
                for item in range(self.array_size):
                    c_values.append(
                        (ctypes.create_string_buffer(value), len(value) + 1))
                c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
                c_akey = ctypes.create_string_buffer("akey {0}".format(akey))

                self.ioreq.insert_array(c_dkey, c_akey, c_values)

                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0

        self.reconnect()

        self.d_log.info("Array Dataset Verification -- Started")
        record_index = 0
        transaction_index = 0
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = []
                value = ("{0}".format(str(akey)[0]) *
                         self.record_length[record_index])
                for item in range(self.array_size):
                    indata.append(value)
                c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
                c_akey = ctypes.create_string_buffer("akey {0}".format(akey))
                c_rec_count = ctypes.c_uint(len(indata))
                c_rec_size = ctypes.c_size_t(len(indata[0]) + 1)

                outdata = self.ioreq.fetch_array(c_dkey, c_akey, c_rec_count,
                                                 c_rec_size)

                for item in enumerate(indata):
                    if indata[item[0]] != outdata[item[0]][:-1]:
                        self.d_log.error("ERROR:Data mismatch for "
                                         "dkey = {0}, "
                                         "akey = {1}".format(
                                             "dkey {0}".format(dkey),
                                             "akey {0}".format(akey)))
                        self.fail(
                            "ERROR:Data mismatch for dkey = {0}, akey={1}".
                            format("dkey {0}".format(dkey),
                                   "akey {0}".format(akey)))

                transaction_index = transaction_index + 1
                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0
Пример #16
0
class OSAUtils(IorTestBase):
    # pylint: disable=too-many-ancestors
    """
    Test Class Description: This test runs
    daos_server offline drain test cases.

    :avocado: recursive
    """
    def setUp(self):
        """Set up for test case."""
        super(OSAUtils, self).setUp()
        self.container = None
        self.obj = None
        self.ioreq = None
        self.dmg_command = self.get_dmg_command()
        self.no_of_dkeys = self.params.get("no_of_dkeys",
                                           '/run/dkeys/*',
                                           default=[0])[0]
        self.no_of_akeys = self.params.get("no_of_akeys",
                                           '/run/akeys/*',
                                           default=[0])[0]
        self.record_length = self.params.get("length",
                                             '/run/record/*',
                                             default=[0])[0]

    @fail_on(CommandFailure)
    def get_pool_leader(self):
        """Get the pool leader.

        Returns:
            int: pool leader value

        """
        data = self.dmg_command.pool_query(self.pool.uuid)
        return int(data["leader"])

    @fail_on(CommandFailure)
    def get_rebuild_status(self):
        """Get the rebuild status.

        Returns:
            str: reuild status

        """
        data = self.dmg_command.pool_query(self.pool.uuid)
        return data["rebuild"]["status"]

    @fail_on(CommandFailure)
    def is_rebuild_done(self, time_interval):
        """Rebuild is completed/done.
        Args:
            time_interval: Wait interval between checks
        Returns:
            False: If rebuild_status not "done" or "completed".
            True: If rebuild status is "done" or "completed".
        """
        status = False
        fail_count = 0
        completion_flag = ["done", "completed"]
        while fail_count <= 20:
            rebuild_status = self.get_rebuild_status()
            time.sleep(time_interval)
            fail_count += 1
            if rebuild_status in completion_flag:
                status = True
                break
        return status

    @fail_on(CommandFailure)
    def assert_on_rebuild_failure(self):
        """If the rebuild is not successful,
        raise assert.
        """
        rebuild_status = self.get_rebuild_status()
        self.log.info("Rebuild Status: %s", rebuild_status)
        rebuild_failed_string = ["failed", "scanning", "aborted", "busy"]
        self.assertTrue(rebuild_status not in rebuild_failed_string,
                        "Rebuild failed")

    @fail_on(CommandFailure)
    def get_pool_version(self):
        """Get the pool version.

        Returns:
            int: pool_version_value

        """
        data = self.dmg_command.pool_query(self.pool.uuid)
        return int(data["version"])

    @fail_on(DaosApiError)
    def write_single_object(self):
        """Write some data to the existing pool."""
        self.pool.connect(2)
        csum = self.params.get("enable_checksum", '/run/container/*')
        self.container = DaosContainer(self.context)
        input_param = self.container.cont_input_values
        input_param.enable_chksum = csum
        self.container.create(poh=self.pool.pool.handle, con_prop=input_param)
        self.container.open()
        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj,
                               objtype=4)
        self.log.info("Writing the Single Dataset")
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0]) * self.record_length)
                d_key_value = "dkey {0}".format(dkey)
                c_dkey = ctypes.create_string_buffer(d_key_value)
                a_key_value = "akey {0}".format(akey)
                c_akey = ctypes.create_string_buffer(a_key_value)
                c_value = ctypes.create_string_buffer(indata)
                c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
                self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size)
        self.obj.close()
        self.container.close()

    @fail_on(DaosApiError)
    def verify_single_object(self):
        """Verify the container data on the existing pool."""
        self.pool.connect(2)
        self.container.open()
        self.obj.open()
        self.log.info("Single Dataset Verification -- Started")
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0]) * self.record_length)
                c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
                c_akey = ctypes.create_string_buffer("akey {0}".format(akey))
                val = self.ioreq.single_fetch(c_dkey, c_akey, len(indata) + 1)
                if indata != (repr(val.value)[1:-1]):
                    self.d_log.error("ERROR:Data mismatch for "
                                     "dkey = {0}, "
                                     "akey = {1}".format(
                                         "dkey {0}".format(dkey),
                                         "akey {0}".format(akey)))
                    self.fail(
                        "ERROR: Data mismatch for dkey = {0}, akey={1}".format(
                            "dkey {0}".format(dkey), "akey {0}".format(akey)))
        self.obj.close()
        self.container.close()

    def ior_thread(self, pool, oclass, api, test, flags, results):
        """Start threads and wait until all threads are finished.

        Args:
            pool (object): pool handle
            oclass (str): IOR object class
            api (str): IOR api
            test (list): IOR test sequence
            flags (str): IOR flags
            results (queue): queue for returning thread results

        """
        container_info = {}
        mpio_util = MpioUtils()
        if mpio_util.mpich_installed(self.hostlist_clients) is False:
            self.fail("Exiting Test : Mpich not installed on :"
                      " {}".format(self.hostfile_clients[0]))
        self.pool = pool
        # Define the arguments for the ior_runner_thread method
        ior_cmd = IorCommand()
        ior_cmd.get_params(self)
        ior_cmd.set_daos_params(self.server_group, self.pool)
        ior_cmd.dfs_oclass.update(oclass)
        ior_cmd.api.update(api)
        ior_cmd.transfer_size.update(test[2])
        ior_cmd.block_size.update(test[3])
        ior_cmd.flags.update(flags)

        container_info["{}{}{}".format(oclass, api,
                                       test[2])] = str(uuid.uuid4())

        # Define the job manager for the IOR command
        self.job_manager = Mpirun(ior_cmd, mpitype="mpich")
        key = "".join([oclass, api, str(test[2])])
        self.job_manager.job.dfs_cont.update(container_info[key])
        env = ior_cmd.get_default_env(str(self.job_manager))
        self.job_manager.assign_hosts(self.hostlist_clients, self.workdir,
                                      None)
        self.job_manager.assign_processes(self.processes)
        self.job_manager.assign_environment(env, True)

        # run IOR Command
        try:
            self.job_manager.run()
        except CommandFailure as _error:
            results.put("FAIL")
Пример #17
0
class OSAUtils(MdtestBase, IorTestBase):
    # pylint: disable=too-many-ancestors
    """
    Test Class Description: This test runs
    daos_server offline drain test cases.

    :avocado: recursive
    """
    def setUp(self):
        """Set up for test case."""
        super().setUp()
        self.pool_cont_dict = {}
        self.container = None
        self.obj = None
        self.ioreq = None
        self.dmg_command = self.get_dmg_command()
        self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*',
                                           default=[0])[0]
        self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*',
                                           default=[0])[0]
        self.record_length = self.params.get("length", '/run/record/*',
                                             default=[0])[0]
        self.ior_w_flags = self.params.get("write_flags", '/run/ior/iorflags/*',
                                           default="")
        self.ior_r_flags = self.params.get("read_flags", '/run/ior/iorflags/*')
        self.server_count = len(self.hostlist_servers)
        self.engine_count = self.server_managers[0].get_config_value(
            "engines_per_host")
        self.out_queue = queue.Queue()
        self.dmg_command.exit_status_exception = False
        self.test_during_aggregation = False
        self.test_during_rebuild = False
        self.test_with_checksum = True
        # By default, test_with_rf is set to False.
        # It is up to individual test to enable it.
        self.test_with_rf = False
        self.test_with_blank_node = False
        self.test_with_snapshot = False

    @fail_on(CommandFailure)
    def get_pool_leader(self):
        """Get the pool leader.

        Returns:
            int: pool leader value

        """
        data = self.dmg_command.pool_query(self.pool.uuid)
        return int(data["response"]["leader"])

    @fail_on(CommandFailure)
    def get_rebuild_status(self):
        """Get the rebuild status.

        Returns:
            str: rebuild status

        """
        data = self.dmg_command.pool_query(self.pool.uuid)
        return data["response"]["rebuild"]["status"]

    @fail_on(CommandFailure)
    def get_rebuild_state(self):
        """Get the rebuild state.

        Returns:
            str: rebuild state

        """
        data = self.dmg_command.pool_query(self.pool.uuid)
        return data["response"]["rebuild"]["state"]

    @fail_on(CommandFailure)
    def is_rebuild_done(self, time_interval,
                        wait_for_rebuild_to_complete=False):
        """Rebuild is completed/done.
        Args:
            time_interval: Wait interval between checks
            wait_for_rebuild_to_complete: Rebuild completed
                                          (Default: False)
        """
        self.pool.wait_for_rebuild(wait_for_rebuild_to_complete,
                                   interval=time_interval)

    @fail_on(CommandFailure)
    def assert_on_rebuild_failure(self):
        """If the rebuild is not successful,
        raise assert.
        """
        rebuild_status = self.get_rebuild_status()
        self.log.info("Rebuild Status: %s", rebuild_status)
        rebuild_failed_string = ["failed", "scanning", "aborted", "busy"]
        self.assertTrue(rebuild_status not in rebuild_failed_string,
                        "Rebuild failed")

    @fail_on(CommandFailure)
    def print_and_assert_on_rebuild_failure(self, out, timeout=3):
        """Print the out value (daos, dmg, etc) and check for rebuild
        completion. If not, raise assert.
        """
        self.log.info(out)
        self.is_rebuild_done(timeout)
        self.assert_on_rebuild_failure()

    @fail_on(CommandFailure)
    def get_pool_version(self):
        """Get the pool version.

        Returns:
            int: pool_version_value

        """
        data = self.dmg_command.pool_query(self.pool.uuid)
        return int(data["response"]["version"])

    @fail_on(CommandFailure)
    def get_ipaddr_for_rank(self, rank=None):
        """Obtain the IPAddress and port number for a
        particular server rank.

        Args:
            rank (int): daos_engine rank. Defaults to None.
        Returns:
            ip_addr (str) : IPAddress for the rank.
            port_num (str) : Port number for the rank.
        """
        output = self.dmg_command.system_query()
        members_length = self.server_count * self.engine_count
        for i in range(0, members_length):
            if rank == int(output["response"]["members"][i]["rank"]):
                temp = output["response"]["members"][i]["addr"]
                ip_addr = temp.split(":")
                temp = output["response"]["members"][i]["fabric_uri"]
                port_num = temp.split(":")
                return ip_addr[0], port_num[2]
        return None, None

    @fail_on(CommandFailure)
    def remove_pool_dir(self, ip_addr=None, port_num=None):
        """Remove the /mnt/daos[x]/<pool_uuid>/vos-* directory

        Args:
            ip_addr (str): IP address of the daos server.
                           Defaults to None.
            port_number (str) : Port number the daos server.
        """
        # Create the expected port list
        # expected_ports = [port0] - Single engine/server
        # expected_ports = [port0, port1] - Two engine/server
        expected_ports = [engine_param.get_value("fabric_iface_port")
                          for engine_param in self.server_managers[-1].
                          manager.job.yaml.engine_params]
        self.log.info("Expected ports : %s", expected_ports)
        if ip_addr is None or port_num is None:
            self.log.info("ip_addr : %s port_number: %s", ip_addr, port_num)
            self.fail("No IP Address or Port number provided")
        else:
            if self.engine_count == 1:
                self.log.info("Single Engine per Server")
                cmd = "/usr/bin/ssh {} -oStrictHostKeyChecking=no \
                      sudo rm -rf /mnt/daos/{}/vos-*". \
                      format(ip_addr, self.pool.uuid)
            elif self.engine_count == 2:
                if port_num == str(expected_ports[0]):
                    port_val = 0
                elif port_num == str(expected_ports[1]):
                    port_val = 1
                else:
                    self.log.info("port_number: %s", port_num)
                    self.fail("Invalid port number")
                cmd = "/usr/bin/ssh {} -oStrictHostKeyChecking=no \
                      sudo rm -rf /mnt/daos{}/{}/vos-*". \
                      format(ip_addr, port_val, self.pool.uuid)
            else:
                self.fail("Not supported engine per server configuration")
            run_command(cmd)

    def set_container(self, container):
        """Set the OSA utils container object.
        Args:
            container (obj) : Container object to be used
                              within OSA utils.
        """
        self.container = container

    def simple_osa_reintegrate_loop(self, rank, action="exclude",
                                    loop_time=100):
        """This method performs exclude or drain and
        reintegration on a rank for a certain amount of time.
        Args:
            rank (int): daos server rank.
            action (str) : "exclude" or "drain".
                           Defaults to "exclude"
            loop_time: Total time to perform drain/reintegrate
                       operation in a loop. (Default : 100 secs)
        """
        start_time = 0
        finish_time = 0
        start_time = time.time()
        while int(finish_time - start_time) < loop_time:
            if action == "exclude":
                output = self.dmg_command.pool_exclude(self.pool.uuid,
                                                       rank)
            else:
                output = self.dmg_command.pool_drain(self.pool.uuid,
                                                     rank)
            self.print_and_assert_on_rebuild_failure(output)
            output = self.dmg_command.pool_reintegrate(self.pool.uuid,
                                                       rank)
            self.print_and_assert_on_rebuild_failure(output)
            finish_time = time.time()

    @fail_on(DaosApiError)
    def write_single_object(self):
        """Write some data to the existing pool."""
        self.pool.connect(2)
        csum = self.params.get("enable_checksum", '/run/container/*')
        self.container = DaosContainer(self.context)
        input_param = self.container.cont_input_values
        input_param.enable_chksum = csum
        self.container.create(poh=self.pool.pool.handle,
                              con_prop=input_param)
        self.container.open()
        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj, objtype=4)
        self.log.info("Writing the Single Dataset")
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0])
                          * self.record_length)
                d_key_value = "dkey {0}".format(dkey)
                c_dkey = create_string_buffer(d_key_value)
                a_key_value = "akey {0}".format(akey)
                c_akey = create_string_buffer(a_key_value)
                c_value = create_string_buffer(indata)
                c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
                self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size)
        self.obj.close()
        self.container.close()

    @fail_on(DaosApiError)
    def verify_single_object(self):
        """Verify the container data on the existing pool."""
        self.pool.connect(2)
        self.container.open()
        self.obj.open()
        self.log.info("Single Dataset Verification -- Started")
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0]) *
                          self.record_length)
                c_dkey = create_string_buffer("dkey {0}".format(dkey))
                c_akey = create_string_buffer("akey {0}".format(akey))
                val = self.ioreq.single_fetch(c_dkey,
                                              c_akey,
                                              len(indata)+1)
                if indata != (repr(val.value)[1:-1]):
                    self.d_log.error("ERROR:Data mismatch for "
                                     "dkey = {0}, "
                                     "akey = {1}".format(
                                         "dkey {0}".format(dkey),
                                         "akey {0}".format(akey)))
                    self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}"
                              .format("dkey {0}".format(dkey),
                                      "akey {0}".format(akey)))
        self.obj.close()
        self.container.close()

    def prepare_cont_ior_write_read(self, oclass, flags):
        """This method prepares the containers for
        IOR write and read invocations.
            To enable aggregation:
            - Create two containers and read always from
              first container
            Normal usage (use only a single container):
            - Create a single container and use the same.
        Args:
            oclass (str): IOR object class
            flags (str): IOR flags
        """
        self.log.info(self.pool_cont_dict)
        # If pool is not in the dictionary,
        # initialize its container list to None
        # {poolA : [None, None], [None, None]}
        if self.pool not in self.pool_cont_dict:
            self.pool_cont_dict[self.pool] = [None] * 4
        # Create container if the pool doesn't have one.
        # Otherwise, use the existing container in the pool.
        # pool_cont_dict {pool A: [containerA, Updated,
        #                          containerB, Updated],
        #                 pool B : containerA, Updated,
        #                          containerB, None]}
        if self.pool_cont_dict[self.pool][0] is None:
            self.add_container(self.pool, create=False)
            self.set_cont_class_properties(oclass)
            if self.test_with_checksum is False:
                tmp = self.get_object_replica_value(oclass)
                rf_value = "rf:{}".format(tmp - 1)
                self.update_cont_properties(rf_value)
            self.container.create()
            self.pool_cont_dict[self.pool][0] = self.container
            self.pool_cont_dict[self.pool][1] = "Updated"
        else:
            if ((self.test_during_aggregation is True) and
               (self.pool_cont_dict[self.pool][1] == "Updated") and
               (self.pool_cont_dict[self.pool][3] is None) and
               ("-w" in flags)):
                # Write to the second container
                self.add_container(self.pool, create=False)
                self.set_cont_class_properties(oclass)
                if self.test_with_checksum is False:
                    tmp = self.get_object_replica_value(oclass)
                    rf_value = "rf:{}".format(tmp - 1)
                    self.update_cont_properties(rf_value)
                self.container.create()
                self.pool_cont_dict[self.pool][2] = self.container
                self.pool_cont_dict[self.pool][3] = "Updated"
            else:
                self.container = self.pool_cont_dict[self.pool][0]

    def delete_extra_container(self, pool):
        """Delete the extra container in the pool.
        Refer prepare_cont_ior_write_read. This method
        should be called when OSA tests intend to
        enable aggregation.
        Args:
            pool (object): pool handle
        """
        self.pool.set_property("reclaim", "time")
        extra_container = self.pool_cont_dict[pool][2]
        extra_container.destroy()
        self.pool_cont_dict[pool][3] = None

    def get_object_replica_value(self, oclass):
        """ Get the object replica value for an object class.

        Args:
            oclass (str): Object Class (eg: RP_2G1,etc)

        Returns:
            value (int) : Object replica value
        """
        value = 0
        if "_" in oclass:
            replica_list = oclass.split("_")
            value = replica_list[1][0]
        else:
            self.log.info("Wrong Object Class. Cannot split")
        return int(value)

    def update_cont_properties(self, cont_prop):
        """Update the existing container properties.
        Args:
            cont_prop (str): Replace existing container properties
                             with new value
        """
        self.container.properties.value = cont_prop

    def set_cont_class_properties(self, oclass="S1"):
        """Update the container class to match the IOR/Mdtest object
        class. Fix the rf factor based on object replica value.
        Also, remove the redundancy factor for S type
        object class.
        Args:
            oclass (str, optional): Container object class to be set.
                                    Defaults to "S1".
        """
        self.container.oclass.value = oclass
        # Set the container properties properly for S!, S2 class.
        # rf should not be set to 1 for S type object class.
        x = re.search("^S\\d$", oclass)
        prop = self.container.properties.value
        if x is not None:
            prop = prop.replace("rf:1", "rf:0")
        else:
            tmp = self.get_object_replica_value(oclass)
            rf_value = "rf:{}".format(tmp - 1)
            prop = prop.replace("rf:1", rf_value)
        self.container.properties.value = prop
        # Over-write oclass settings if using redundancy factor
        # and self.test_with_rf is True.
        # This has to be done so that container created doesn't
        # use the object class.
        if self.test_with_rf is True and \
           "rf" in self.container.properties.value:
            self.log.info(
                "Detected container redundancy factor: %s",
                self.container.properties.value)
            self.ior_cmd.dfs_oclass.update(None, "ior.dfs_oclass")
            self.ior_cmd.dfs_dir_oclass.update(None, "ior.dfs_dir_oclass")
            self.container.oclass.update(None)

    def assert_on_exception(self, out_queue=None):
        """Assert on exception while executing an application.

        Args:
            out_queue (queue): Check whether the queue is
            empty. If empty, app (ior, mdtest) didn't encounter error.
        """
        if out_queue is None:
            out_queue = self.out_queue
        if out_queue.empty():
            pass
        else:
            exc = out_queue.get(block=False)
            out_queue.put(exc)
            raise CommandFailure(exc)

    def cleanup_queue(self, out_queue=None):
        """Cleanup the existing thread queue.

        Args:
            out_queue (queue): Queue to cleanup.
        """
        if out_queue is None:
            out_queue = self.out_queue
        while not out_queue.empty():
            out_queue.get(block=True)

    def run_ior_thread(self, action, oclass, test, single_cont_read=True,
                       fail_on_warning=True, pool=None):
        """Start the IOR thread for either writing or
        reading data to/from a container.
        Args:
            action (str): Start the IOR thread with Read or
                          Write
            oclass (str): IOR object class
            test (list): IOR test sequence
            flags (str): IOR flags
            single_cont_read (bool) : Always read from the
                                      1st container.
                                      Defaults to True.
            fail_on_warning (bool)  : Test terminates
                                      for IOR warnings.
                                      Defaults to True.
            pool (TestPool): Pool to run ior on. Defaults to None.

        """
        # Intermediate (between correct and hack) implementation for allowing a
        # pool to be passed in. Needs to be fixed by making the pool argument
        # required.
        if pool is None:
            pool = self.pool

        self.cleanup_queue()
        if action == "Write":
            flags = self.ior_w_flags
        else:
            flags = self.ior_r_flags

        # Add a thread for these IOR arguments
        process = threading.Thread(target=self.ior_thread,
                                   kwargs={"pool": pool,
                                           "oclass": oclass,
                                           "test": test,
                                           "flags": flags,
                                           "single_cont_read":
                                           single_cont_read,
                                           "fail_on_warning":
                                           fail_on_warning})
        # Launch the IOR thread
        process.start()
        # Wait for the thread to finish
        process.join()
        if not self.out_queue.empty():
            self.assert_on_exception()

    def ior_thread(self, pool, oclass, test, flags,
                   single_cont_read=True,
                   fail_on_warning=True):
        """Start an IOR thread.

        Args:
            pool (object): pool handle
            oclass (str): IOR object class, container class.
            test (list): IOR test sequence
            flags (str): IOR flags
            single_cont_read (bool) : Always read from the
                                      1st container.
                                      Defaults to True.
            fail_on_warning (bool)  : Test terminates
                                      for IOR warnings.
                                      Defaults to True.
        """
        self.cleanup_queue()
        self.pool = pool
        self.ior_cmd.get_params(self)
        self.ior_cmd.set_daos_params(self.server_group, self.pool)
        self.log.info("Redundancy Factor : %s", self.test_with_rf)
        self.ior_cmd.dfs_oclass.update(oclass)
        self.ior_cmd.dfs_dir_oclass.update(oclass)
        if single_cont_read is True:
            # Prepare the containers created and use in a specific
            # way defined in prepare_cont_ior_write.
            self.prepare_cont_ior_write_read(oclass, flags)
        elif single_cont_read is False and self.container is not None:
            # Here self.container is having actual value. Just use it.
            self.log.info(self.container)
        else:
            self.fail("Not supported option on ior_thread")
        try:
            job_manager = self.get_ior_job_manager_command()
        except CommandFailure as err_msg:
            self.out_queue.put(err_msg)
            self.assert_on_exception()
        job_manager.job.dfs_cont.update(self.container.uuid)
        self.ior_cmd.transfer_size.update(test[2])
        self.ior_cmd.block_size.update(test[3])
        self.ior_cmd.flags.update(flags)
        # Update oclass settings if using redundancy factor
        # and self.test_with_rf is True.
        if self.test_with_rf is True and \
           "rf" in self.container.properties.value:
            self.log.info(
                "Detected container redundancy factor: %s",
                self.container.properties.value)
            self.ior_cmd.dfs_oclass.update(None, "ior.dfs_oclass")
            self.ior_cmd.dfs_dir_oclass.update(None, "ior.dfs_dir_oclass")
        self.run_ior_with_pool(create_pool=False, create_cont=False,
                               fail_on_warning=fail_on_warning,
                               out_queue=self.out_queue)
        if not self.out_queue.empty():
            self.assert_on_exception()

    def run_mdtest_thread(self, oclass="RP_2G1"):
        """Start mdtest thread and wait until thread completes.
        Args:
            oclass (str): IOR object class, container class.
        """
        # Create container only
        self.mdtest_cmd.dfs_destroy = False
        create_container = 0
        if self.container is None:
            self.add_container(self.pool, create=False)
            create_container = 1
        self.mdtest_cmd.dfs_oclass.update(oclass)
        self.set_cont_class_properties(oclass)
        if self.test_with_checksum is False:
            tmp = self.get_object_replica_value(oclass)
            rf_value = "rf:{}".format(tmp - 1)
            self.update_cont_properties(rf_value)
        if create_container == 1:
            self.container.create()
        job_manager = self.get_mdtest_job_manager_command(self.manager)
        job_manager.job.dfs_cont.update(self.container.uuid)
        # Add a thread for these IOR arguments
        process = threading.Thread(target=self.execute_mdtest)
        # Launch the MDtest thread
        process.start()
        # Wait for the thread to finish
        process.join()
        if not self.out_queue.empty():
            self.assert_on_exception()
Пример #18
0
    def dataset_verify(self, obj_list, cont, num_objs, num_dkeys,
                       num_akeys_single, num_akeys_array, akey_sizes,
                       akey_extents):
        """Verify a dataset generated with dataset_gen.

        Args:
            obj_list (list): obj_list returned from dataset_gen.
            cont (TestContainer): the container.
            num_objs (int): number of objects created in the container.
            num_dkeys (int): number of dkeys created per object.
            num_akeys_single (int): number of DAOS_IOD_SINGLE akeys per dkey.
            num_akeys_array (int): number of DAOS_IOD_ARRAY akeys per dkey.
            akey_sizes (list): varying akey sizes to iterate.
            akey_extents (list): varying number of akey extents to iterate.

        """
        self.log.info("Verifying dataset in %s/%s", str(cont.pool.uuid),
                      str(cont.uuid))

        cont.open()

        for obj_idx in range(num_objs):
            # Open the obj
            c_oid = obj_list[obj_idx].c_oid
            obj = DaosObj(cont.pool.context, cont.container, c_oid=c_oid)
            obj.open()

            ioreq = IORequest(cont.pool.context, cont.container, obj)
            for dkey_idx in range(num_dkeys):
                dkey = "dkey {}".format(dkey_idx)
                c_dkey = create_string_buffer(dkey)

                for akey_idx in range(num_akeys_single):
                    # Round-robin to get the size of data and
                    # arbitrarily use a number 0-9 to fill data
                    akey_size_idx = akey_idx % len(akey_sizes)
                    data_size = akey_sizes[akey_size_idx]
                    data_val = str(akey_idx % 10)
                    data = data_size * data_val
                    akey = "akey single {}".format(akey_idx)
                    c_akey = create_string_buffer(akey)
                    c_data = ioreq.single_fetch(c_dkey, c_akey, data_size + 1)
                    actual_data = str(c_data.value.decode())
                    if actual_data != data:
                        self.log.info("Expected:\n%s\nBut got:\n%s",
                                      data[:100] + "...",
                                      actual_data[:100] + "...")
                        self.log.info("For:\nobj: %s.%s\ndkey: %s\nakey: %s",
                                      str(obj.c_oid.hi), str(obj.c_oid.lo),
                                      dkey, akey)
                        self.fail("Single value verification failed.")

                for akey_idx in range(num_akeys_array):
                    # Round-robin to get the size of data and
                    # the number of extents, and
                    # arbitrarily use a number 0-9 to fill data
                    akey_size_idx = akey_idx % len(akey_sizes)
                    data_size = akey_sizes[akey_size_idx]
                    akey_extent_idx = akey_idx % len(akey_extents)
                    num_extents = akey_extents[akey_extent_idx]
                    akey = "akey array {}".format(akey_idx)
                    c_akey = create_string_buffer(akey)
                    c_num_extents = ctypes.c_uint(num_extents)
                    c_data_size = ctypes.c_size_t(data_size)
                    actual_data = ioreq.fetch_array(c_dkey, c_akey,
                                                    c_num_extents, c_data_size)
                    for data_idx in range(num_extents):
                        data_val = str(data_idx % 10)
                        data = data_size * data_val
                        actual_idx = str(actual_data[data_idx].decode())
                        if data != actual_idx:
                            self.log.info("Expected:\n%s\nBut got:\n%s",
                                          data[:100] + "...",
                                          actual_idx + "...")
                            self.log.info(
                                "For:\nobj: %s.%s\ndkey: %s\nakey: %s",
                                str(obj.c_oid.hi), str(obj.c_oid.lo), dkey,
                                akey)
                            self.fail("Array verification failed.")

            obj.close()
        cont.close()
Пример #19
0
    def dataset_gen(self, cont, num_objs, num_dkeys, num_akeys_single,
                    num_akeys_array, akey_sizes, akey_extents):
        """Generate a dataset with some number of objects, dkeys, and akeys.

        Expects the container to be created with the API control method.

        Args:
            cont (TestContainer): the container.
            num_objs (int): number of objects to create in the container.
            num_dkeys (int): number of dkeys to create per object.
            num_akeys_single (int): number of DAOS_IOD_SINGLE akeys per dkey.
            num_akeys_array (int): number of DAOS_IOD_ARRAY akeys per dkey.
            akey_sizes (list): varying akey sizes to iterate.
            akey_extents (list): varying number of akey extents to iterate.

        Returns:
            list: a list of DaosObj created.

        """
        self.log.info("Creating dataset in %s/%s", str(cont.pool.uuid),
                      str(cont.uuid))

        cont.open()

        obj_list = []

        for obj_idx in range(num_objs):
            # Open the obj
            obj = DaosObj(cont.pool.context, cont.container)
            obj_list.append(obj)
            obj.create(rank=obj_idx, objcls=2)
            obj.open()

            ioreq = IORequest(cont.pool.context, cont.container, obj)
            for dkey_idx in range(num_dkeys):
                dkey = "dkey {}".format(dkey_idx)
                c_dkey = create_string_buffer(dkey)

                for akey_idx in range(num_akeys_single):
                    # Round-robin to get the size of data and
                    # arbitrarily use a number 0-9 to fill data
                    akey_size_idx = akey_idx % len(akey_sizes)
                    data_size = akey_sizes[akey_size_idx]
                    data_val = str(akey_idx % 10)
                    data = data_size * data_val
                    akey = "akey single {}".format(akey_idx)
                    c_akey = create_string_buffer(akey)
                    c_data = create_string_buffer(data)
                    c_size = ctypes.c_size_t(ctypes.sizeof(c_data))
                    ioreq.single_insert(c_dkey, c_akey, c_data, c_size)

                for akey_idx in range(num_akeys_array):
                    # Round-robin to get the size of data and
                    # the number of extents, and
                    # arbitrarily use a number 0-9 to fill data
                    akey_size_idx = akey_idx % len(akey_sizes)
                    data_size = akey_sizes[akey_size_idx]
                    akey_extent_idx = akey_idx % len(akey_extents)
                    num_extents = akey_extents[akey_extent_idx]
                    akey = "akey array {}".format(akey_idx)
                    c_akey = create_string_buffer(akey)
                    c_data = []
                    for data_idx in range(num_extents):
                        data_val = str(data_idx % 10)
                        data = data_size * data_val
                        c_data.append([create_string_buffer(data), data_size])
                    ioreq.insert_array(c_dkey, c_akey, c_data)

            obj.close()
        cont.close()

        return obj_list
Пример #20
0
    def verify_dataset(self, cont):
        """Verify the dataset.

        Args:
            cont (TestContainer): the container

        """
        self.log.info("Verifying dataset in %s/%s", str(cont.pool.uuid),
                      str(cont.uuid))

        cont.open()

        for obj_idx in range(self.num_objs):
            obj = DaosObj(cont.pool.context, cont.container,
                          self.obj_list[obj_idx].c_oid)
            obj.open()
            ioreq = IORequest(cont.pool.context, cont.container, obj)
            for dkey_idx in range(self.num_dkeys_per_obj):
                dkey = "dkey {}".format(dkey_idx)
                c_dkey = ctypes.create_string_buffer(dkey.encode())

                for akey_idx in range(self.num_akeys_single_per_dkey):
                    # Round-robin to get the size of data and
                    # arbitrarily use a number 0-9 to fill data
                    akey_size_idx = akey_idx % len(self.akey_sizes)
                    data_size = self.akey_sizes[akey_size_idx]
                    data_val = str(akey_idx % 10)
                    data = str(data_size * data_val)
                    akey = "akey single {}".format(akey_idx)
                    c_akey = ctypes.create_string_buffer(akey.encode())
                    buf = ioreq.single_fetch(c_dkey, c_akey, data_size + 1)
                    actual_data = str(buf.value.decode())
                    if actual_data != data:
                        self.log.info("Expected:\n%s\nBut got:\n%s",
                                      data[:100] + "...",
                                      actual_data[:100] + "...")
                        self.log.info("For:\nobj: %s.%s\ndkey: %s\nakey: %s",
                                      str(obj.c_oid.hi), str(obj.c_oid.lo),
                                      dkey, akey)
                        self.fail("Single value verification failed.")

                for akey_idx in range(self.num_akeys_array_per_dkey):
                    # Round-robin to get the size of data and
                    # the number of extents, and
                    # arbitrarily use a number 0-9 to fill data
                    akey_size_idx = akey_idx % len(self.akey_sizes)
                    data_size = self.akey_sizes[akey_size_idx]
                    c_data_size = ctypes.c_size_t(data_size)
                    akey_extent_idx = akey_idx % len(self.akey_extents)
                    num_extents = self.akey_extents[akey_extent_idx]
                    c_num_extents = ctypes.c_uint(num_extents)
                    akey = "akey array {}".format(akey_idx)
                    c_akey = ctypes.create_string_buffer(akey.encode())
                    actual_data = ioreq.fetch_array(c_dkey, c_akey,
                                                    c_num_extents, c_data_size)
                    for data_idx in range(num_extents):
                        data_val = str(data_idx % 10)
                        data = str(data_size * data_val)
                        actual_idx = str(actual_data[data_idx].decode())
                        if data != actual_idx:
                            self.log.info("Expected:\n%s\nBut got:\n%s",
                                          data[:100] + "...",
                                          actual_idx + "...")
                            self.log.info(
                                "For:\nobj: %s.%s\ndkey: %s\nakey: %s",
                                str(obj.c_oid.hi), str(obj.c_oid.lo), dkey,
                                akey)
                            self.fail("Array verification failed.")
            obj.close()
        cont.close()