コード例 #1
0
 def verify_single_object(self):
     """Verify the container data on the existing pool."""
     self.pool.connect(2)
     self.container.open()
     self.obj.open()
     self.log.info("Single Dataset Verification -- Started")
     for dkey in range(self.no_of_dkeys):
         for akey in range(self.no_of_akeys):
             indata = ("{0}".format(str(akey)[0]) *
                       self.record_length)
             c_dkey = create_string_buffer("dkey {0}".format(dkey))
             c_akey = create_string_buffer("akey {0}".format(akey))
             val = self.ioreq.single_fetch(c_dkey,
                                           c_akey,
                                           len(indata)+1)
             if indata != (repr(val.value)[1:-1]):
                 self.d_log.error("ERROR:Data mismatch for "
                                  "dkey = {0}, "
                                  "akey = {1}".format(
                                      "dkey {0}".format(dkey),
                                      "akey {0}".format(akey)))
                 self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}"
                           .format("dkey {0}".format(dkey),
                                   "akey {0}".format(akey)))
     self.obj.close()
     self.container.close()
コード例 #2
0
 def write_single_object(self):
     """Write some data to the existing pool."""
     self.pool.connect(2)
     csum = self.params.get("enable_checksum", '/run/container/*')
     self.container = DaosContainer(self.context)
     input_param = self.container.cont_input_values
     input_param.enable_chksum = csum
     self.container.create(poh=self.pool.pool.handle,
                           con_prop=input_param)
     self.container.open()
     self.obj = DaosObj(self.context, self.container)
     self.obj.create(objcls=1)
     self.obj.open()
     self.ioreq = IORequest(self.context,
                            self.container,
                            self.obj, objtype=4)
     self.log.info("Writing the Single Dataset")
     for dkey in range(self.no_of_dkeys):
         for akey in range(self.no_of_akeys):
             indata = ("{0}".format(str(akey)[0])
                       * self.record_length)
             d_key_value = "dkey {0}".format(dkey)
             c_dkey = create_string_buffer(d_key_value)
             a_key_value = "akey {0}".format(akey)
             c_akey = create_string_buffer(a_key_value)
             c_value = create_string_buffer(indata)
             c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
             self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size)
     self.obj.close()
     self.container.close()
コード例 #3
0
ファイル: create_many_dkeys.py プロジェクト: kjacque/daos
    def write_a_bunch_of_values(self, how_many):
        """
        Write data to an object, each with a dkey and akey.  The how_many
        parameter determines how many key:value pairs are written.
        """

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.pool.handle)
        self.container.open()

        ioreq = IORequest(self.context, self.container, None)

        print("Started Writing the Dataset-----------\n")
        inc = 50000
        last_key = inc
        for key in range(how_many):
            c_dkey = create_string_buffer("dkey {0}".format(key))
            c_akey = create_string_buffer("akey {0}".format(key))
            c_value = create_string_buffer(
                "some data that gets stored with the key {0}".format(key))
            c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
            ioreq.single_insert(c_dkey,
                                c_akey,
                                c_value,
                                c_size)

            if key > last_key:
                print("written: {}".format(key))
                sys.stdout.flush()
                last_key = key + inc

        print("Started Verification of the Dataset-----------\n")
        last_key = inc
        for key in range(how_many):
            c_dkey = create_string_buffer("dkey {0}".format(key))
            c_akey = create_string_buffer("akey {0}".format(key))
            the_data = "some data that gets stored with the key {0}".format(key)
            val = ioreq.single_fetch(c_dkey,
                                     c_akey,
                                     len(the_data)+1)
            exp_value = val.value.decode("utf-8")
            if the_data != exp_value:
                self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}, "
                          "Expected Value={2} and Received Value={3}\n"
                          .format("dkey {0}".format(key),
                                  "akey {0}".format(key),
                                  the_data,
                                  exp_value))

            if key > last_key:
                print("veried: {}".format(key))
                sys.stdout.flush()
                last_key = key + inc

        print("starting destroy")
        self.container.close()
        self.container.destroy()
        print("destroy complete")
コード例 #4
0
    def test_single_object_validation(self):
        """
        Test ID: DAOS-707
        Test Description: Write Avocado Test to verify single data after
                          pool/container disconnect/reconnect.
        :avocado: tags=all,full_regression,small
        :avocado: tags=object,objectvalidation
        :avocado: tags=single_object
        """
        self.d_log.info("Writing the Single Dataset")
        record_index = 0
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0])
                          * self.record_length[record_index])
                c_dkey = create_string_buffer("dkey {0}".format(dkey))
                c_akey = create_string_buffer("akey {0}".format(akey))
                c_value = create_string_buffer(indata)
                c_size = ctypes.c_size_t(ctypes.sizeof(c_value))

                self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size)
                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0

        self.reconnect()

        self.d_log.info("Single Dataset Verification -- Started")
        record_index = 0
        transaction_index = 0
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0]) *
                          self.record_length[record_index])
                c_dkey = create_string_buffer("dkey {0}".format(dkey))
                c_akey = create_string_buffer("akey {0}".format(akey))
                val = self.ioreq.single_fetch(c_dkey,
                                              c_akey,
                                              len(indata)+1)
                if indata != str(val.value, 'utf-8'):
                    self.d_log.error("ERROR:Data mismatch for "
                                     "dkey = {0}, "
                                     "akey = {1}".format(
                                         "dkey {0}".format(dkey),
                                         "akey {0}".format(akey)))
                    self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}"
                              .format("dkey {0}".format(dkey),
                                      "akey {0}".format(akey)))

                transaction_index = transaction_index + 1
                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0
コード例 #5
0
ファイル: destroy_tests.py プロジェクト: xiaming9880/daos
    def test_destroy_invalid_group(self):
        """Test destroying a valid pool but use the wrong server group.

        :avocado: tags=all,medium,full_regression
        :avocado: tags=pool,destroy,destroyinvalidgroup
        """
        hostlist_servers = self.hostlist_servers[:1]
        setid = self.params.get("setname", '/run/setnames/validsetname/')
        badsetid = self.params.get("setname", '/run/setnames/badsetname/')

        # Start servers
        self.start_servers(self.get_group(setid, hostlist_servers))

        # Create a pool
        self.validate_pool_creation(hostlist_servers, setid)

        # Change the pool server group name
        valid_group = self.pool.pool.group
        self.pool.pool.group = create_string_buffer(badsetid)

        # Attempt to destroy the pool with an invalid server group name
        self.validate_pool_destroy(
            hostlist_servers,
            "with an invalid server group name {}".format(badsetid), True)

        # Restore the valid pool server group name to allow tearDown() to pass
        self.log.info("Restoring the pool's valid server group name: %s",
                      str(valid_group.value))
        self.pool.pool.group = valid_group
コード例 #6
0
    def test_single_object_with_checksum(self):
        """
        Test ID: DAOS-3927
        Test Description: Write Avocado Test to verify single data after
                          pool/container disconnect/reconnect.
        :avocado: tags=all,daily_regression
        :avocado: tags=checksum
        :avocado: tags=basic_checksum_object
        """
        self.d_log.info("Writing the Single Dataset")
        record_index = 0
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0])
                          * self.record_length[record_index])
                c_dkey = create_string_buffer("dkey {0}".format(dkey))
                c_akey = create_string_buffer("akey {0}".format(akey))
                c_value = create_string_buffer(indata)
                c_size = ctypes.c_size_t(ctypes.sizeof(c_value))

                self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size)
                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0

        self.d_log.info("Single Dataset Verification -- Started")
        record_index = 0
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0]) *
                          self.record_length[record_index])
                c_dkey = create_string_buffer("dkey {0}".format(dkey))
                c_akey = create_string_buffer("akey {0}".format(akey))
                val = self.ioreq.single_fetch(c_dkey,
                                              c_akey,
                                              len(indata)+1)
                if indata != val.value.decode('utf-8'):
                    message = (
                        "ERROR:Data mismatch for dkey={}, akey={}: indata={}, "
                        "val={}".format(
                            dkey, akey, indata, val.value.decode('utf-8')))
                    self.d_log.error(message)
                    self.fail(message)
                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0
コード例 #7
0
ファイル: destroy_tests.py プロジェクト: xiaming9880/daos
    def test_destroy_wrong_group(self):
        """Test destroying a pool.

         Destroy a pool on group A that was created on server group B,
         should fail.

        :avocado: tags=all,medium,full_regression
        :avocado: tags=pool,destroy,destroywronggroup
        """
        group_names = [self.server_group + "_a", self.server_group + "_b"]
        group_hosts = {
            group_names[0]: self.get_group_info(self.hostlist_servers[:1]),
            group_names[1]: self.get_group_info(self.hostlist_servers[1:2]),
        }
        self.start_servers(group_hosts)

        self.log.info("Create a pool in server group %s", group_names[0])
        self.add_pool(create=False)
        self.pool.name.value = group_names[0]
        self.pool.create()
        self.log.info("Pool UUID is %s", self.pool.uuid)

        # Commented out due to DAOS-3836.
        # self.assertTrue(
        #    self.pool.check_files(group_hosts[group_names[0]]),
        #    "Pool UUID {} not dected in server group {}".format(
        #        self.pool.uuid, group_names[0]))
        # self.assertFalse(
        #    self.pool.check_files(group_hosts[group_names[1]]),
        #    "Pool UUID {} detected in server group {}".format(
        #        self.pool.uuid, group_names[1]))

        # Attempt to delete the pool from the wrong server group - should fail
        self.pool.pool.group = create_string_buffer(group_names[1])
        self.validate_pool_destroy(
            group_hosts[group_names[0]],
            "{} from the wrong server group {}".format(self.pool.uuid,
                                                       group_names[1]), True)

        # Attempt to delete the pool from the right server group - should pass
        self.pool.pool.group = create_string_buffer(group_names[0])
        self.validate_pool_destroy(
            group_hosts[group_names[1]],
            "{} from the right server group {}".format(self.pool.uuid,
                                                       group_names[0]), False)
コード例 #8
0
    def write_objects_insert_keys(self, container, objtype):
        """Write objects and insert dkeys and akeys in them.

        Args:
            container (DaosContainer): Container.
            objtype (str): Object class.
        """
        for i in range(self.obj_count):
            self.ioreqs.append(
                IORequest(context=self.context,
                          container=container,
                          obj=None,
                          objtype=objtype))

            # Prepare 2 dkeys and 1 akey in each dkey. Use the same akey for
            # both dkeys.
            dkey_str_a = b"Sample dkey A %d" % i
            dkey_str_b = b"Sample dkey B %d" % i
            akey_str = b"Sample akey %d" % i
            data_str = b"Sample data %d" % i
            self.dkey_strs_a.append(dkey_str_a)
            self.dkey_strs_b.append(dkey_str_b)
            self.akey_strs.append(akey_str)
            data = create_string_buffer(data_str)

            # Pass in length of the key so that it won't have \0 termination.
            # Not necessary here because we're not interested in the list
            # output. Just for debugging.
            self.dkeys_a.append(
                create_string_buffer(value=dkey_str_a, size=len(dkey_str_a)))
            self.dkeys_b.append(
                create_string_buffer(value=dkey_str_b, size=len(dkey_str_b)))
            akey = create_string_buffer(value=akey_str, size=len(akey_str))
            c_size = ctypes.c_size_t(ctypes.sizeof(data))

            # Insert the dkeys.
            self.ioreqs[-1].single_insert(dkey=self.dkeys_a[-1],
                                          akey=akey,
                                          value=data,
                                          size=c_size)
            self.ioreqs[-1].single_insert(dkey=self.dkeys_b[-1],
                                          akey=akey,
                                          value=data,
                                          size=c_size)
コード例 #9
0
    def evict_badparam(self, test_param):
        """Connect to pool, connect and try to evict with a bad param.

        Args:
            test_param (str): either invalid UUID or bad server name

        Returns:
            TestPool (bool)

        """
        # setup pool and connect
        self.pool = self.connected_pool(self.hostlist_servers)

        self.log.info("Pool UUID: %s\n Pool handle: %s\n Server group: %s\n",
                      self.pool.uuid, self.pool.pool.handle.value,
                      self.pool.name)

        server_name = None

        if test_param == "BAD_SERVER_NAME":
            # Attempt to evict pool with invalid server group name
            # set the server group name directly
            server_name = test_param
            self.pool.pool.group = create_string_buffer(test_param)
            self.log.info("Evicting pool with invalid Server Group Name: %s",
                          test_param)
        elif test_param == "invalid_uuid":
            # Attempt to evict pool with invalid UUID
            bogus_uuid = self.pool.uuid
            # in case uuid4() generates pool.uuid
            while bogus_uuid == self.pool.uuid:
                bogus_uuid = str(uuid.uuid4())
            # set the UUID directly
            self.pool.pool.set_uuid_str(bogus_uuid)
            self.log.info("Evicting pool with Invalid Pool UUID:  %s",
                          self.pool.pool.get_uuid_str())
        else:
            self.fail("Invalid yaml parameters - check \"params\" values")
        try:
            # call dmg pool_evict directly
            self.pool.dmg.pool_evict(pool=self.pool.pool.get_uuid_str(),
                                     sys=server_name)
        # exception is expected
        except CommandFailure as result:
            self.log.info("Expected exception - invalid param %s\n %s\n",
                          test_param, str(result))

            # verify that pool still exists and the handle is still valid.
            self.log.info("Check if pool handle still exist")
            return self.pool_handle_exist(test_param)
        finally:
            # Restore the valid server group name or uuid
            if "BAD_SERVER_NAME" in test_param:
                self.pool.pool.group = create_string_buffer(self.server_group)
            else:
                self.pool.pool.set_uuid_str(self.pool.uuid)

        # if here then pool-evict did not raise an exception as expected
        # restore the valid server group name and check if valid pool
        # still exists
        self.log.info(
            "Command exception did not occur"
            " - evict from pool with %s", test_param)

        # check if pool handle still exists
        self.pool_handle_exist(test_param)

        # Commented out due to DAOS-3836.
        # if self.pool.check_files(self.hostlist_servers):
        #    self.log.error("Valid pool files were not detected on server after"
        #                   " a pool evict with %s failed to raise an "
        #                   "exception", test_param)
        self.log.error(
            "Test did not raise an exception with when "
            "evicting a pool with bad param: %s", test_param)
        return False
コード例 #10
0
    def dataset_verify(self, obj_list, cont, num_objs, num_dkeys,
                       num_akeys_single, num_akeys_array, akey_sizes,
                       akey_extents):
        """Verify a dataset generated with dataset_gen.

        Args:
            obj_list (list): obj_list returned from dataset_gen.
            cont (TestContainer): the container.
            num_objs (int): number of objects created in the container.
            num_dkeys (int): number of dkeys created per object.
            num_akeys_single (int): number of DAOS_IOD_SINGLE akeys per dkey.
            num_akeys_array (int): number of DAOS_IOD_ARRAY akeys per dkey.
            akey_sizes (list): varying akey sizes to iterate.
            akey_extents (list): varying number of akey extents to iterate.

        """
        self.log.info("Verifying dataset in %s/%s", str(cont.pool.uuid),
                      str(cont.uuid))

        cont.open()

        for obj_idx in range(num_objs):
            # Open the obj
            c_oid = obj_list[obj_idx].c_oid
            obj = DaosObj(cont.pool.context, cont.container, c_oid=c_oid)
            obj.open()

            ioreq = IORequest(cont.pool.context, cont.container, obj)
            for dkey_idx in range(num_dkeys):
                dkey = "dkey {}".format(dkey_idx)
                c_dkey = create_string_buffer(dkey)

                for akey_idx in range(num_akeys_single):
                    # Round-robin to get the size of data and
                    # arbitrarily use a number 0-9 to fill data
                    akey_size_idx = akey_idx % len(akey_sizes)
                    data_size = akey_sizes[akey_size_idx]
                    data_val = str(akey_idx % 10)
                    data = data_size * data_val
                    akey = "akey single {}".format(akey_idx)
                    c_akey = create_string_buffer(akey)
                    c_data = ioreq.single_fetch(c_dkey, c_akey, data_size + 1)
                    actual_data = str(c_data.value.decode())
                    if actual_data != data:
                        self.log.info("Expected:\n%s\nBut got:\n%s",
                                      data[:100] + "...",
                                      actual_data[:100] + "...")
                        self.log.info("For:\nobj: %s.%s\ndkey: %s\nakey: %s",
                                      str(obj.c_oid.hi), str(obj.c_oid.lo),
                                      dkey, akey)
                        self.fail("Single value verification failed.")

                for akey_idx in range(num_akeys_array):
                    # Round-robin to get the size of data and
                    # the number of extents, and
                    # arbitrarily use a number 0-9 to fill data
                    akey_size_idx = akey_idx % len(akey_sizes)
                    data_size = akey_sizes[akey_size_idx]
                    akey_extent_idx = akey_idx % len(akey_extents)
                    num_extents = akey_extents[akey_extent_idx]
                    akey = "akey array {}".format(akey_idx)
                    c_akey = create_string_buffer(akey)
                    c_num_extents = ctypes.c_uint(num_extents)
                    c_data_size = ctypes.c_size_t(data_size)
                    actual_data = ioreq.fetch_array(c_dkey, c_akey,
                                                    c_num_extents, c_data_size)
                    for data_idx in range(num_extents):
                        data_val = str(data_idx % 10)
                        data = data_size * data_val
                        actual_idx = str(actual_data[data_idx].decode())
                        if data != actual_idx:
                            self.log.info("Expected:\n%s\nBut got:\n%s",
                                          data[:100] + "...",
                                          actual_idx + "...")
                            self.log.info(
                                "For:\nobj: %s.%s\ndkey: %s\nakey: %s",
                                str(obj.c_oid.hi), str(obj.c_oid.lo), dkey,
                                akey)
                            self.fail("Array verification failed.")

            obj.close()
        cont.close()
コード例 #11
0
    def dataset_gen(self, cont, num_objs, num_dkeys, num_akeys_single,
                    num_akeys_array, akey_sizes, akey_extents):
        """Generate a dataset with some number of objects, dkeys, and akeys.

        Expects the container to be created with the API control method.

        Args:
            cont (TestContainer): the container.
            num_objs (int): number of objects to create in the container.
            num_dkeys (int): number of dkeys to create per object.
            num_akeys_single (int): number of DAOS_IOD_SINGLE akeys per dkey.
            num_akeys_array (int): number of DAOS_IOD_ARRAY akeys per dkey.
            akey_sizes (list): varying akey sizes to iterate.
            akey_extents (list): varying number of akey extents to iterate.

        Returns:
            list: a list of DaosObj created.

        """
        self.log.info("Creating dataset in %s/%s", str(cont.pool.uuid),
                      str(cont.uuid))

        cont.open()

        obj_list = []

        for obj_idx in range(num_objs):
            # Open the obj
            obj = DaosObj(cont.pool.context, cont.container)
            obj_list.append(obj)
            obj.create(rank=obj_idx, objcls=2)
            obj.open()

            ioreq = IORequest(cont.pool.context, cont.container, obj)
            for dkey_idx in range(num_dkeys):
                dkey = "dkey {}".format(dkey_idx)
                c_dkey = create_string_buffer(dkey)

                for akey_idx in range(num_akeys_single):
                    # Round-robin to get the size of data and
                    # arbitrarily use a number 0-9 to fill data
                    akey_size_idx = akey_idx % len(akey_sizes)
                    data_size = akey_sizes[akey_size_idx]
                    data_val = str(akey_idx % 10)
                    data = data_size * data_val
                    akey = "akey single {}".format(akey_idx)
                    c_akey = create_string_buffer(akey)
                    c_data = create_string_buffer(data)
                    c_size = ctypes.c_size_t(ctypes.sizeof(c_data))
                    ioreq.single_insert(c_dkey, c_akey, c_data, c_size)

                for akey_idx in range(num_akeys_array):
                    # Round-robin to get the size of data and
                    # the number of extents, and
                    # arbitrarily use a number 0-9 to fill data
                    akey_size_idx = akey_idx % len(akey_sizes)
                    data_size = akey_sizes[akey_size_idx]
                    akey_extent_idx = akey_idx % len(akey_extents)
                    num_extents = akey_extents[akey_extent_idx]
                    akey = "akey array {}".format(akey_idx)
                    c_akey = create_string_buffer(akey)
                    c_data = []
                    for data_idx in range(num_extents):
                        data_val = str(data_idx % 10)
                        data = data_size * data_val
                        c_data.append([create_string_buffer(data), data_size])
                    ioreq.insert_array(c_dkey, c_akey, c_data)

            obj.close()
        cont.close()

        return obj_list
コード例 #12
0
ファイル: object_integrity.py プロジェクト: xiaming9880/daos
    def test_invalid_tx_commit_close(self):
        """
        Test ID:
            (1)DAOS-1346: Verify commit tx bad parameter behavior.
            (2)DAOS-1343: Verify tx_close bad parameter behavior.
            (3)DAOS-1342: Verify tx_close through daos_api.
            (4)DAOS-1338: Add and verify tx_abort through daos_api.
            (5)DAOS-1339: Verify tx_abort bad parameter behavior.
        Test Description:
            Write Avocado Test to verify commit tx and close tx
                          bad parameter behavior.
        :avocado: tags=all,object,full_regression,small,invalid_tx
        """
        self.d_log.info("==Writing the Single Dataset for negative test...")
        record_index = 0
        expected_error = "RC: -1002"
        dkey = 0
        akey = 0
        indata = ("{0}".format(str(akey)[0]) *
                  self.record_length[record_index])
        c_dkey = create_string_buffer("dkey {0}".format(dkey))
        c_akey = create_string_buffer("akey {0}".format(akey))
        c_value = create_string_buffer(indata)
        c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
        try:
            new_transaction = self.container.get_new_tx()
        except DaosApiError as excep:
            #initial container get_new_tx failed, skip rest of the test
            self.fail("##container get_new_tx failed: {}".format(excep))
        invalid_transaction = new_transaction + random.randint(1000, 383838)
        self.log.info("==new_transaction=     %s", new_transaction)
        self.log.info("==invalid_transaction= %s", invalid_transaction)
        self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size,
                                 new_transaction)
        try:
            self.container.commit_tx(invalid_transaction)
            self.fail("##(1.1)Container.commit_tx passing with invalid handle")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.log.info(
                "==(1)Expecting failure: invalid Container.commit_tx.")
            if expected_error not in str(excep):
                self.fail(
                    "##(1.2)Expecting error RC: -1002, but got {}.".format(
                        str(excep)))
        try:
            self.container.close_tx(invalid_transaction)
            self.fail("##(2.1)Container.close_tx passing with invalid handle")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.log.info(
                "==(2)Expecting failure: invalid Container.commit_tx.")
            if expected_error not in str(excep):
                self.fail(
                    "##(2.2)Expecting error RC: -1002, but got {}.".format(
                        str(excep)))
        try:
            self.container.close_tx(new_transaction)
            self.log.info("==(3)container.close_tx test passed.")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.fail("##(3)Failed on close_tx.")

        try:
            self.container.abort_tx(invalid_transaction)
            self.fail("##(4.1)Container.abort_tx passing with invalid handle")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.log.info(
                "==(4)Expecting failure: invalid Container.abort_tx.")
            if expected_error not in str(excep):
                self.fail(
                    "##(4.2)Expecting error RC: -1002, but got {}.".format(
                        str(excep)))

        #Try to abort the transaction which already closed.
        try:
            self.container.abort_tx(new_transaction)
            self.fail("##(5.1)Container.abort_tx passing with a closed handle")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.log.info(
                "==(5)Expecting failure: Container.abort_tx closed handle.")
            if expected_error not in str(excep):
                self.fail(
                    "##(5.2)Expecting error RC: -1002, but got {}.".format(
                        str(excep)))

        #open another transaction for abort test
        try:
            new_transaction2 = self.container.get_new_tx()
        except DaosApiError as excep:
            self.fail("##(6.1)container get_new_tx failed: {}".format(excep))
        self.log.info("==new_transaction2=     %s", new_transaction2)
        self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size,
                                 new_transaction2)
        try:
            self.container.abort_tx(new_transaction2)
            self.log.info("==(6)container.abort_tx test passed.")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.fail("##(6.2)Failed on abort_tx.")

        self.container.close_tx(new_transaction2)
コード例 #13
0
ファイル: object_integrity.py プロジェクト: xiaming9880/daos
    def test_array_object_validation(self):
        """
        Test ID: DAOS-707
        Test Description: Write Avocado Test to verify Array data after
                          pool/container disconnect/reconnect.
        :avocado: tags=all,object,full_regression,small,array_object
        """
        self.d_log.info("Writing the Array Dataset")
        record_index = 0
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                c_values = []
                value = ("{0}".format(str(akey)[0]) *
                         self.record_length[record_index])
                for item in range(self.array_size):
                    c_values.append(
                        (create_string_buffer(value), len(value) + 1))
                c_dkey = create_string_buffer("dkey {0}".format(dkey))
                c_akey = create_string_buffer("akey {0}".format(akey))

                self.ioreq.insert_array(c_dkey, c_akey, c_values)

                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0

        self.reconnect()

        self.d_log.info("Array Dataset Verification -- Started")
        record_index = 0
        transaction_index = 0
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = []
                value = ("{0}".format(str(akey)[0]) *
                         self.record_length[record_index])
                for item in range(self.array_size):
                    indata.append(value)
                c_dkey = create_string_buffer("dkey {0}".format(dkey))
                c_akey = create_string_buffer("akey {0}".format(akey))
                c_rec_count = ctypes.c_uint(len(indata))
                c_rec_size = ctypes.c_size_t(len(indata[0]) + 1)

                outdata = self.ioreq.fetch_array(c_dkey, c_akey, c_rec_count,
                                                 c_rec_size)

                for item in enumerate(indata):
                    if indata[item[0]] != outdata[item[0]][:-1]:
                        self.d_log.error("ERROR:Data mismatch for "
                                         "dkey = {0}, "
                                         "akey = {1}".format(
                                             "dkey {0}".format(dkey),
                                             "akey {0}".format(akey)))
                        self.fail(
                            "ERROR:Data mismatch for dkey = {0}, akey={1}".
                            format("dkey {0}".format(dkey),
                                   "akey {0}".format(akey)))

                transaction_index = transaction_index + 1
                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0
コード例 #14
0
ファイル: test_utils_pool.py プロジェクト: xiaming9880/daos
    def create(self):
        """Create a pool with dmg.

        To use dmg, the test needs to set dmg_command through the constructor.
        For example,

            self.pool = TestPool(self.context, DmgCommand(self.bin))

        If it wants to use --nsvc option, it needs to set the value to
        svcn.value. Otherwise, 1 is used. If it wants to use --group, it needs
        to set groupname.value. If it wants to use --user, it needs to set
        username.value. If it wants to add other options, directly set it
        to self.dmg.action_command. Refer dmg_utils.py pool_create method for
        more details.

        To test the negative case on create, the test needs to catch
        CommandFailure. Thus, we need to make more than one line modification
        to the test only for this purpose.
        Currently, pool_svc is the only test that needs this change.
        """
        self.destroy()
        if self.target_list.value is not None:
            self.log.info("Creating a pool on targets %s",
                          self.target_list.value)
        else:
            self.log.info("Creating a pool")

        self.pool = DaosPool(self.context)
        kwargs = {
            "uid": self.uid,
            "gid": self.gid,
            "scm_size": self.scm_size.value,
            "group": self.name.value
        }
        for key in ("target_list", "svcn", "nvme_size"):
            value = getattr(self, key).value
            if value is not None:
                kwargs[key] = value

        if self.control_method.value == self.USE_API:
            raise CommandFailure(
                "Error: control method {} not supported for create()".format(
                    self.control_method.value))

        if self.control_method.value == self.USE_DMG and self.dmg:
            # Create a pool with the dmg command and store its CmdResult
            self._log_method("dmg.pool_create", kwargs)
            data = self.dmg.pool_create(**kwargs)
            if self.dmg.result.exit_status == 0:
                # Populate the empty DaosPool object with the properties of the
                # pool created with dmg pool create.
                if self.name.value:
                    self.pool.group = create_string_buffer(self.name.value)

                # Convert the string of service replicas from the dmg command
                # output into an ctype array for the DaosPool object using the
                # same technique used in DaosPool.create().
                service_replicas = [
                    int(value) for value in data["svc"].split(",")
                ]
                rank_t = ctypes.c_uint * len(service_replicas)
                rank = rank_t(*list([svc for svc in service_replicas]))
                rl_ranks = ctypes.POINTER(ctypes.c_uint)(rank)
                self.pool.svc = daos_cref.RankList(rl_ranks,
                                                   len(service_replicas))

                # Set UUID and attached to the DaosPool object
                self.pool.set_uuid_str(data["uuid"])
                self.pool.attached = 1

        elif self.control_method.value == self.USE_DMG:
            self.log.error("Error: Undefined dmg command")

        else:
            self.log.error("Error: Undefined control_method: %s",
                           self.control_method.value)

        # Set the TestPool attributes for the created pool
        if self.pool.attached:
            self.svc_ranks = [
                int(self.pool.svc.rl_ranks[index])
                for index in range(self.pool.svc.rl_nr)
            ]
            self.uuid = self.pool.get_uuid_str()
コード例 #15
0
ファイル: bad_connect.py プロジェクト: xiaming9880/daos
    def test_connect(self):
        """
        Pass bad parameters to pool connect

        :avocado: tags=all,pool,full_regression,tiny,badconnect
        """
        # Accumulate a list of pass/fail indicators representing what is
        # expected for each parameter then "and" them to determine the
        # expected result of the test
        expected_for_param = []

        modelist = self.params.get("mode", '/run/connecttests/connectmode/*/')
        connectmode = modelist[0]
        expected_for_param.append(modelist[1])

        setlist = self.params.get("setname",
                                  '/run/connecttests/connectsetnames/*/')
        connectset = setlist[0]
        expected_for_param.append(setlist[1])

        uuidlist = self.params.get("uuid", '/run/connecttests/UUID/*/')
        connectuuid = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        # if any parameter is FAIL then the test should FAIL, in this test
        # virtually everyone should FAIL since we are testing bad parameters
        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        puuid = (ctypes.c_ubyte * 16)()
        pgroup = create_string_buffer(0)
        # initialize a python pool object then create the underlying
        # daos storage
        self.pool = TestPool(self.context, self.get_dmg_command())
        self.pool.get_params(self)
        self.pool.create()
        # save this uuid since we might trash it as part of the test
        ctypes.memmove(puuid, self.pool.pool.uuid, 16)

        # trash the pool group value
        pgroup = self.pool.pool.group
        if connectset == 'NULLPTR':
            self.pool.pool.group = None

        # trash the UUID value in various ways
        if connectuuid == 'NULLPTR':
            self.pool.pool.uuid = None
        if connectuuid == 'JUNK':
            self.pool.pool.uuid[4] = 244

        try:
            self.pool.connect(1 << connectmode)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except TestFail as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result in ['PASS']:
                self.fail("Test was expected to pass but it failed.\n")

        # cleanup the pool
        finally:
            if self.pool is not None and self.pool.pool.attached == 1:
                # restore values in case we trashed them during test
                self.pool.pool.group = pgroup
                if self.pool.pool.uuid is None:
                    self.pool.pool.uuid = (ctypes.c_ubyte * 16)()
                ctypes.memmove(self.pool.pool.uuid, puuid, 16)
                print("pool uuid after restore {}".format(
                    self.pool.pool.get_uuid_str()))