Ejemplo n.º 1
0
    def test_bad_handle(self):
        """
        Test ID: DAOS-1376

        Test Description: Pass a bogus object handle, should return bad handle.

        :avocado: tags=all,object,full_regression,small,objbadhand
        """
        self.prepare_pool()

        try:
            # create a container
            container = DaosContainer(self.context)
            container.create(self.pool.pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # create an object and write some data into it
            thedata = "a string that I want to stuff into an object"
            thedatasize = len(thedata) + 1
            dkey = "this is the dkey"
            akey = "this is the akey"
            obj = container.write_an_obj(thedata, thedatasize, dkey, akey,
                                         None, None, 2)

            saved_oh = obj.obj_handle
            obj.obj_handle = 99999

            obj = container.write_an_obj(thedata, thedatasize, dkey, akey, obj,
                                         None, 2)

            container.oh = saved_oh
            container.close()
            container.destroy()
            self.fail("Test was expected to return a -1002 but it has not.\n")

        except DaosApiError as excep:
            container.oh = saved_oh
            container.close()
            container.destroy()
            self.plog.info("Test Complete")
            if '-1002' not in str(excep):
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1002 but it has not.\n")
Ejemplo n.º 2
0
class CreateManyDkeys(TestWithServers):
    """
    Test Class Description:
        Tests that create large numbers of keys in objects/containers and then
        destroy the containers and verify the space has been reclaimed.

    :avocado: recursive
    """

    def write_a_bunch_of_values(self, how_many):
        """
        Write data to an object, each with a dkey and akey.  The how_many
        parameter determines how many key:value pairs are written.
        """

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.pool.handle)
        self.container.open()

        ioreq = IORequest(self.context, self.container, None)

        print("Started Writing the Dataset-----------\n")
        inc = 50000
        last_key = inc
        for key in range(how_many):
            c_dkey = create_string_buffer("dkey {0}".format(key))
            c_akey = create_string_buffer("akey {0}".format(key))
            c_value = create_string_buffer(
                "some data that gets stored with the key {0}".format(key))
            c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
            ioreq.single_insert(c_dkey,
                                c_akey,
                                c_value,
                                c_size)

            if key > last_key:
                print("written: {}".format(key))
                sys.stdout.flush()
                last_key = key + inc

        print("Started Verification of the Dataset-----------\n")
        last_key = inc
        for key in range(how_many):
            c_dkey = create_string_buffer("dkey {0}".format(key))
            c_akey = create_string_buffer("akey {0}".format(key))
            the_data = "some data that gets stored with the key {0}".format(key)
            val = ioreq.single_fetch(c_dkey,
                                     c_akey,
                                     len(the_data)+1)
            exp_value = val.value.decode("utf-8")
            if the_data != exp_value:
                self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}, "
                          "Expected Value={2} and Received Value={3}\n"
                          .format("dkey {0}".format(key),
                                  "akey {0}".format(key),
                                  the_data,
                                  exp_value))

            if key > last_key:
                print("veried: {}".format(key))
                sys.stdout.flush()
                last_key = key + inc

        print("starting destroy")
        self.container.close()
        self.container.destroy()
        print("destroy complete")

    @avocado.fail_on(DaosApiError)
    def test_many_dkeys(self):
        """
        Test ID: DAOS-1701
        Test Description: Test many of dkeys in same object.
        Use Cases: 1. large key counts
                   2. space reclamation after destroy

        :avocado: tags=all,full_regression
        :avocado: tags=small
        :avocado: tags=object
        :avocado: tags=many_dkeys
        """
        self.prepare_pool()
        no_of_dkeys = self.params.get("number_of_dkeys", '/run/dkeys/')

        # write a lot of individual data items, verify them, then destroy
        self.write_a_bunch_of_values(no_of_dkeys)


        # do it again, which should verify the first container
        # was truly destroyed because a second round won't fit otherwise
        self.write_a_bunch_of_values(no_of_dkeys)
Ejemplo n.º 3
0
    def test_array_obj(self):
        """
        Test ID: DAOS-961

        Test Description: Writes an array to an object and then reads it
        back and verifies it.

        :avocado: tags=all,smoke,pr,object,tiny,basicobject
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode",
                                         '/run/pool_params/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/pool_params/createset/')
            createsize = self.params.get("size",
                                         '/run/pool_params/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid, createsize,
                        createsetid, None)
            self.plog.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            container.query()

            if container.get_uuid_str() != c_uuid_to_str(
                    container.info.ci_uuid):
                self.fail("Container UUID did not match the one in info\n")

            # create an object and write some data into it
            thedata = []
            thedata.append("data string one")
            thedata.append("data string two")
            thedata.append("data string tre")
            dkey = "this is the dkey"
            akey = "this is the akey"

            self.plog.info("writing array to dkey >%s< akey >%s<.", dkey, akey)
            oid, epoch = container.write_an_array_value(thedata,
                                                        dkey,
                                                        akey,
                                                        obj_cls=3)

            # read the data back and make sure its correct
            length = len(thedata[0])
            thedata2 = container.read_an_array(len(thedata), length + 1, dkey,
                                               akey, oid, epoch)
            if thedata[0][0:length - 1] != thedata2[0][0:length - 1]:
                self.plog.error("Data mismatch")
                self.plog.error("Wrote: >%s<", thedata[0])
                self.plog.error("Read: >%s<", thedata2[0])
                self.fail("Write data, read it back, didn't match\n")

            if thedata[2][0:length - 1] != thedata2[2][0:length - 1]:
                self.plog.error("Data mismatch")
                self.plog.error("Wrote: >%s<", thedata[2])
                self.plog.error("Read: >%s<", thedata2[2])
                self.fail("Write data, read it back, didn't match\n")

            container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            container.destroy()

            # cleanup the pool
            pool.disconnect()
            pool.destroy(1)
            self.plog.info("Test Complete")

        except DaosApiError as excep:
            self.plog.error("Test Failed, exception was thrown.")
            print(excep)
            print(traceback.format_exc())
            self.fail("Test was expected to pass but it failed.\n")
Ejemplo n.º 4
0
    def test_null_values(self):
        """
        Test ID: DAOS-1376

        Test Description: Pass a dkey and an akey that is null.

        :avocado: tags=all,object,full_regression,small,objupdatenull
        """
        self.prepare_pool()

        try:
            # create a container
            container = DaosContainer(self.context)
            container.create(self.pool.pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # data used in the test
            thedata = "a string that I want to stuff into an object"
            thedatasize = len(thedata) + 1

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test failed during setup .\n")

        try:
            # try using a null dkey
            dkey = None
            akey = "this is the akey"

            container.write_an_obj(thedata, thedatasize, dkey, akey, None,
                                   None, 2)

            container.close()
            container.destroy()
            self.plog.error("Didn't get expected return code.")
            self.fail("Test was expected to return a -1003 but it has not.\n")

        except DaosApiError as excep:
            if '-1003' not in str(excep):
                container.close()
                container.destroy()
                self.plog.error("Didn't get expected return code.")
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1003 but it has not.\n")

        try:
            # try using a null akey/io descriptor
            dkey = "this is the dkey"
            akey = None
            container.write_an_obj(thedata, thedatasize, dkey, akey, None,
                                   None, 2)
            self.fail("Test was expected to return a -1003 but it has not.\n")

        except DaosApiError as excep:
            if '-1003' not in str(excep):
                self.plog.error("Didn't get expected return code.")
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1003 but it has not.\n")

        try:
            # lastly try passing no data
            thedata = None
            thedatasize = 0
            dkey = "this is the dkey"
            akey = "this is the akey"

            container.write_an_obj(thedata, thedatasize, dkey, akey, None,
                                   None, 2)
            self.plog.info("Update with no data worked")

        except DaosApiError as excep:
            container.close()
            container.destroy()
            print(excep)
            print(traceback.format_exc())
            self.plog.error("Update with no data failed")
            self.fail("Update with no data failed.\n")

        container.close()
        container.destroy()
        self.plog.info("Test Complete")
Ejemplo n.º 5
0
    def test_dkey_akey_enum_punch(self):
        """Test count and active for enum and punch.

        Test Steps:
        1. Write 100 objects.
        2. Insert 2 dkeys per object. Insert 1 akey per dkey. Use OC_S1.
        3. Punch all akeys and dkeys.
        4. Call list_dkey() and list_akey() on the objects.
        5. Verify the metrics below.

        --- Metrics tested ---
        1. engine_pool_ops_dkey_enum
        Number of list_dkey() calls made to each object. Since we create 100
        objects and call this on all of them, we expect this value to sum up to
        100.

        2. engine_pool_ops_akey_enum
        Number of list_akey() calls made to each dkey. We create 2 dkeys per
        object and there are 100 objects, so we expect this value to sum up to
        200.

        3. engine_pool_ops_dkey_punch
        Number of dkeys punched. There are 200 dkeys total and we punch them one
        at a time, so we expect this value to sum up to 200.

        4. engine_pool_ops_akey_punch
        Number of akeys punched. There are 200 akeys total and we punch them one
        at a time, so we expect this value to sum up to 200.

        5. dkey enum active and latency
        engine_io_ops_dkey_enum_active_max
        engine_io_ops_dkey_enum_active_mean
        engine_io_ops_dkey_enum_active_min
        engine_io_ops_dkey_enum_active_stddev
        engine_io_ops_dkey_enum_latency_max
        engine_io_ops_dkey_enum_latency_mean
        engine_io_ops_dkey_enum_latency_min
        engine_io_ops_dkey_enum_latency_stddev

        Active means the number of list_dkey() called at the same time. e.g.,
        If 2 calls are made simultaneously and that's the highest, we would see
        engine_io_ops_dkey_enum_active_max = 2. However, in this test, we expect
        the max to be always 1 because making it more than 1 isn't
        straightforward.

        Latency is the time it took to process the list_dkey() calls. The slight
        difference from active is that min would contain the actual latency and
        not 0 when there's at least one call.

        6. akey enum active and latency
        engine_io_ops_akey_enum_active_max
        engine_io_ops_akey_enum_active_mean
        engine_io_ops_akey_enum_active_min
        engine_io_ops_akey_enum_active_stddev
        engine_io_ops_akey_enum_latency_max
        engine_io_ops_akey_enum_latency_mean
        engine_io_ops_akey_enum_latency_min
        engine_io_ops_akey_enum_latency_stddev

        Same idea as dkey. We also expect to see very similar output as in dkey.

        7. dkey punch active and latency
        engine_io_ops_dkey_punch_active_max
        engine_io_ops_dkey_punch_active_mean
        engine_io_ops_dkey_punch_active_min
        engine_io_ops_dkey_punch_active_stddev
        engine_io_ops_dkey_punch_latency_max
        engine_io_ops_dkey_punch_latency_mean
        engine_io_ops_dkey_punch_latency_min
        engine_io_ops_dkey_punch_latency_stddev

        Same as 5 except we're measuring the active and latency for
        punch_dkeys() calls.

        8. akey punch active and latency
        engine_io_ops_akey_punch_active_max
        engine_io_ops_akey_punch_active_mean
        engine_io_ops_akey_punch_active_min
        engine_io_ops_akey_punch_active_stddev
        engine_io_ops_akey_punch_latency_max
        engine_io_ops_akey_punch_latency_mean
        engine_io_ops_akey_punch_latency_min
        engine_io_ops_akey_punch_latency_stddev

        Same idea as dkey. We also expect to see very similar output as in dkey.

        :avocado: tags=all,full_regression
        :avocado: tags=vm
        :avocado: tags=telemetry
        :avocado: tags=dkey_akey_enum_punch
        """
        self.add_pool()

        self.set_num_targets()

        container = DaosContainer(self.context)
        container.create(self.pool.pool.handle)
        container.open()

        # Object type needs to be OC_S1 so that the objects are spread across
        # all targets.
        self.write_objects_insert_keys(container=container,
                                       objtype=DaosObjClass.OC_S1)

        # Call list_dkey() and list_akey() on each object.
        for i in range(self.obj_count):
            _ = self.ioreqs[i].list_dkey()
            _ = self.ioreqs[i].list_akey(dkey=self.dkeys_a[i])
            _ = self.ioreqs[i].list_akey(dkey=self.dkeys_b[i])

        self.punch_all_keys()

        self.telemetry.dmg.verbose = False

        ### Obtain and verify the io metrics 1 to 4. ###
        # engine_pool_ops_dkey_enum
        pool_dkey_enum = self.telemetry.ENGINE_POOL_METRICS[5]
        # engine_pool_ops_akey_enum
        pool_akey_enum = self.telemetry.ENGINE_POOL_METRICS[2]
        # engine_pool_ops_dkey_punch
        pool_dkey_punch = self.telemetry.ENGINE_POOL_METRICS[6]
        # engine_pool_ops_akey_punch
        pool_akey_punch = self.telemetry.ENGINE_POOL_METRICS[3]
        specific_metrics = [
            pool_dkey_enum,
            pool_akey_enum,
            pool_dkey_punch,
            pool_akey_punch,
        ]
        pool_out = self.telemetry.get_pool_metrics(
            specific_metrics=specific_metrics)

        # Verify dkey_enum total is 100.
        dkey_enum_total = self.sum_values(metric_out=pool_out[pool_dkey_enum])
        if dkey_enum_total != 100:
            msg = "dkey enum total is not 100! Actual = {}".format(
                dkey_enum_total)
            self.errors.append(msg)

        # Verify akey_enum total is 200.
        akey_enum_total = self.sum_values(metric_out=pool_out[pool_akey_enum])
        if akey_enum_total != 200:
            msg = "akey enum total is not 200! Actual = {}".format(
                akey_enum_total)
            self.errors.append(msg)

        # Verify dkey_punch total is 200.
        dkey_punch_total = self.sum_values(
            metric_out=pool_out[pool_dkey_punch])
        if dkey_punch_total != 200:
            msg = "dkey punch total is not 200! Actual = {}".format(
                dkey_punch_total)
            self.errors.append(msg)

        # Verify akey_punch total is 200.
        akey_punch_total = self.sum_values(
            metric_out=pool_out[pool_akey_punch])
        if akey_punch_total != 200:
            msg = "akey punch total is not 200! Actual = {}".format(
                akey_punch_total)
            self.errors.append(msg)

        ### Verify active and latency; metrics 5 to 8. ###
        # Verify dkey enum active.
        self.verify_active_latency(prefix="engine_io_ops_dkey_enum_active_",
                                   test_latency=False)

        # Verify akey enum active.
        self.verify_active_latency(prefix="engine_io_ops_akey_enum_active_",
                                   test_latency=False)

        # Verify dkey enum latency.
        self.verify_active_latency(prefix="engine_io_ops_dkey_enum_latency_",
                                   test_latency=True)

        # Verify akey enum latency.
        self.verify_active_latency(prefix="engine_io_ops_akey_enum_latency_",
                                   test_latency=True)

        # Verify dkey punch active.
        self.verify_active_latency(prefix="engine_io_ops_dkey_punch_active_",
                                   test_latency=False)

        # Verify akey punch active.
        self.verify_active_latency(prefix="engine_io_ops_akey_punch_active_",
                                   test_latency=False)

        # Verify dkey punch latency.
        self.verify_active_latency(prefix="engine_io_ops_dkey_punch_latency_",
                                   test_latency=True)

        # Verify akey punch latency.
        self.verify_active_latency(prefix="engine_io_ops_akey_punch_latency_",
                                   test_latency=True)

        if self.errors:
            self.fail("\n----- Errors detected! -----\n{}".format("\n".join(
                self.errors)))

        container.destroy()
        self.pool.destroy(disconnect=0)
Ejemplo n.º 6
0
class DeleteContainerTest(TestWithServers):
    """
    Tests DAOS container delete and close.
    :avocado: recursive
    """
    def setUp(self):
        super(DeleteContainerTest, self).setUp()

        # parameters used in pool create
        self.createmode = self.params.get("mode",
                                          '/run/createtests/createmode/')
        self.createuid = os.geteuid()
        self.creategid = os.getegid()
        self.createsetid = self.params.get("setname",
                                           '/run/createtests/createset/')
        self.createsize = self.params.get("size",
                                          '/run/createtests/createsize/')

    def test_container_delete(self):
        """
        Test basic container delete

        :avocado: tags=all,container,tiny,smoke,pr,contdelete
        """
        expected_for_param = []
        uuidlist = self.params.get("uuid",
                                   '/run/createtests/ContainerUUIDS/*/')
        cont_uuid = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        pohlist = self.params.get("poh", '/run/createtests/PoolHandles/*/')
        poh = pohlist[0]
        expected_for_param.append(pohlist[1])

        openlist = self.params.get("opened",
                                   "/run/createtests/ConnectionOpened/*/")
        opened = openlist[0]
        expected_for_param.append(openlist[1])

        forcelist = self.params.get("force",
                                    "/run/createtests/ForceDestroy/*/")
        force = forcelist[0]
        expected_for_param.append(forcelist[1])

        if force >= 1:
            self.cancel("Force >= 1 blocked by issue described in "
                        "https://jira.hpdd.intel.com/browse/DAOS-689")

        if force == 0:
            self.cancel("Force = 0 blocked by "
                        "https://jira.hpdd.intel.com/browse/DAOS-1935")

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(self.createmode, self.createuid, self.creategid,
                             self.createsize, self.createsetid, None)

            # need a connection to create container
            self.pool.connect(1 << 1)
            self.container = DaosContainer(self.context)

            # create should always work (testing destroy)
            if not cont_uuid == 'INVALID':
                cont_uuid = uuid.UUID(uuidlist[0])
                self.container.create(self.pool.handle, cont_uuid)
            else:
                self.container.create(self.pool.handle)

            # Opens the container if required
            if opened:
                self.container.open(self.pool.handle)

            # wait a few seconds and then attempts to destroy container
            time.sleep(5)
            if poh == 'VALID':
                poh = self.pool.handle

            # if container is INVALID, overwrite with non existing UUID
            if cont_uuid == 'INVALID':
                cont_uuid = uuid.uuid4()

            self.container.destroy(force=force, poh=poh, con_uuid=cont_uuid)
            self.container = None

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            self.d_log.error(excep)
            self.d_log.error(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
Ejemplo n.º 7
0
    def test_null_values(self):
        """
        Test ID: DAOS-1376

        Test Description: Pass a dkey and an akey that is null.

        :avocado: tags=all,object,full_regression,small,objupdatenull
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid, createsize,
                        createsetid, None)
            self.plog.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # data used in the test
            thedata = "a string that I want to stuff into an object"
            thedatasize = len(thedata) + 1

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test failed during setup .\n")

        try:
            # try using a null dkey
            dkey = None
            akey = "this is the akey"

            container.write_an_obj(thedata, thedatasize, dkey, akey, None,
                                   None, 2)

            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            self.plog.error("Didn't get expected return code.")
            self.fail("Test was expected to return a -1003 but it has not.\n")

        except DaosApiError as excep:
            if '-1003' not in str(excep):
                container.close()
                container.destroy()
                pool.disconnect()
                pool.destroy(1)
                self.plog.error("Didn't get expected return code.")
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1003 but it has not.\n")

        try:
            # try using a null akey/io descriptor
            dkey = "this is the dkey"
            akey = None
            container.write_an_obj(thedata, thedatasize, dkey, akey, None,
                                   None, 2)
            self.fail("Test was expected to return a -1003 but it has not.\n")

        except DaosApiError as excep:
            if '-1003' not in str(excep):
                self.plog.error("Didn't get expected return code.")
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1003 but it has not.\n")

        try:
            # lastly try passing no data
            thedata = None
            thedatasize = 0
            dkey = "this is the dkey"
            akey = "this is the akey"

            container.write_an_obj(thedata, thedatasize, dkey, akey, None,
                                   None, 2)
            self.plog.info("Update with no data worked")

        except DaosApiError as excep:
            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            print(excep)
            print(traceback.format_exc())
            self.plog.error("Update with no data failed")
            self.fail("Update with no data failed.\n")

        container.close()
        container.destroy()
        pool.disconnect()
        pool.destroy(1)
        self.plog.info("Test Complete")
Ejemplo n.º 8
0
class ObjectDataValidation(avocado.Test):
    """
    Test Class Description:
        Tests that create Different length records,
        Disconnect the pool/container and reconnect,
        validate the data after reconnect.
    """

    # pylint: disable=too-many-instance-attributes
    def setUp(self):
        self.agent_sessions = None
        self.pool = None
        self.container = None
        self.obj = None
        self.ioreq = None
        self.hostlist = None
        self.hostfile = None
        self.no_of_dkeys = None
        self.no_of_akeys = None
        self.array_size = None
        self.record_length = None

        with open('../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        server_group = self.params.get("name", '/server_config/',
                                       'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib64/')
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(
            self.hostlist, self.workdir)
        self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0]
        self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0]
        self.array_size = self.params.get("size", '/array_size/')
        self.record_length = self.params.get("length", '/run/record/*')

        self.agent_sessions = agent_utils.run_agent(self.basepath,
                                                    self.hostlist)
        server_utils.run_server(self, self.hostfile, server_group)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(), os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         None)
        self.pool.connect(2)

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.handle)
        self.container.open()

        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj,
                               objtype=4)

    def tearDown(self):
        try:
            if self.container:
                self.container.close()
                self.container.destroy()
            if self.pool:
                self.pool.disconnect()
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                agent_utils.stop_agent(self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    def reconnect(self):
        '''
        Function to reconnect the pool/container and reopen the Object
        for read verification.
        '''
        #Close the Obj/Container, Disconnect the Pool.
        self.obj.close()
        self.container.close()
        self.pool.disconnect()
        time.sleep(5)
        #Connect Pool, Open Container and Object
        self.pool.connect(2)
        self.container.open()
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj,
                               objtype=4)

    @avocado.fail_on(DaosApiError)
    def test_invalid_tx_commit_close(self):
        """
        Test ID:
            (1)DAOS-1346: Verify commit tx bad parameter behavior.
            (2)DAOS-1343: Verify tx_close bad parameter behavior.
            (3)DAOS-1342: Verify tx_close through daos_api.
            (4)DAOS-1338: Add and verify tx_abort through daos_api.
            (5)DAOS-1339: Verify tx_abort bad parameter behavior.
        Test Description:
            Write Avocado Test to verify commit tx and close tx
                          bad parameter behavior.
        :avocado: tags=all,object,full_regression,small,invalid_tx
        """
        self.d_log.info("==Writing the Single Dataset for negative test...")
        record_index = 0
        expected_error = "RC: -1002"
        dkey = 0
        akey = 0
        indata = ("{0}".format(str(akey)[0]) *
                  self.record_length[record_index])
        c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
        c_akey = ctypes.create_string_buffer("akey {0}".format(akey))
        c_value = ctypes.create_string_buffer(indata)
        c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
        try:
            new_transaction = self.container.get_new_tx()
        except DaosApiError as excep:
            #initial container get_new_tx failed, skip rest of the test
            self.fail("##container get_new_tx failed: {}".format(excep))
        invalid_transaction = new_transaction + random.randint(1000, 383838)
        self.log.info("==new_transaction=     %s", new_transaction)
        self.log.info("==invalid_transaction= %s", invalid_transaction)
        self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size,
                                 new_transaction)
        try:
            self.container.commit_tx(invalid_transaction)
            self.fail("##(1.1)Container.commit_tx passing with invalid handle")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.log.info(
                "==(1)Expecting failure: invalid Container.commit_tx.")
            if expected_error not in str(excep):
                self.fail(
                    "##(1.2)Expecting error RC: -1002, but got {}.".format(
                        str(excep)))
        try:
            self.container.close_tx(invalid_transaction)
            self.fail("##(2.1)Container.close_tx passing with invalid handle")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.log.info(
                "==(2)Expecting failure: invalid Container.commit_tx.")
            if expected_error not in str(excep):
                self.fail(
                    "##(2.2)Expecting error RC: -1002, but got {}.".format(
                        str(excep)))
        try:
            self.container.close_tx(new_transaction)
            self.log.info("==(3)container.close_tx test passed.")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.fail("##(3)Failed on close_tx.")

        try:
            self.container.abort_tx(invalid_transaction)
            self.fail("##(4.1)Container.abort_tx passing with invalid handle")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.log.info(
                "==(4)Expecting failure: invalid Container.abort_tx.")
            if expected_error not in str(excep):
                self.fail(
                    "##(4.2)Expecting error RC: -1002, but got {}.".format(
                        str(excep)))

        #Try to abort the transaction which already closed.
        try:
            self.container.abort_tx(new_transaction)
            self.fail("##(5.1)Container.abort_tx passing with a closed handle")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.log.info(
                "==(5)Expecting failure: Container.abort_tx closed handle.")
            if expected_error not in str(excep):
                self.fail(
                    "##(5.2)Expecting error RC: -1002, but got {}.".format(
                        str(excep)))

        #open another transaction for abort test
        try:
            new_transaction2 = self.container.get_new_tx()
        except DaosApiError as excep:
            self.fail("##(6.1)container get_new_tx failed: {}".format(excep))
        self.log.info("==new_transaction2=     %s", new_transaction2)
        self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size,
                                 new_transaction2)
        try:
            self.container.abort_tx(new_transaction2)
            self.log.info("==(6)container.abort_tx test passed.")
        except DaosApiError as excep:
            self.log.info(str(excep))
            self.fail("##(6.2)Failed on abort_tx.")

    @avocado.fail_on(DaosApiError)
    @skipForTicket("DAOS-3208")
    def test_single_object_validation(self):
        """
        Test ID: DAOS-707
        Test Description: Write Avocado Test to verify single data after
                          pool/container disconnect/reconnect.
        :avocado: tags=all,object,full_regression,small,single_object
        """
        self.d_log.info("Writing the Single Dataset")
        record_index = 0
        transaction = []
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0]) *
                          self.record_length[record_index])
                c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
                c_akey = ctypes.create_string_buffer("akey {0}".format(akey))
                c_value = ctypes.create_string_buffer(indata)
                c_size = ctypes.c_size_t(ctypes.sizeof(c_value))

                new_transaction = self.container.get_new_tx()
                self.ioreq.single_insert(c_dkey, c_akey, c_value, c_size,
                                         new_transaction)
                self.container.commit_tx(new_transaction)
                transaction.append(new_transaction)
                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0

        self.reconnect()

        self.d_log.info("Single Dataset Verification -- Started")
        record_index = 0
        transaction_index = 0
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = ("{0}".format(str(akey)[0]) *
                          self.record_length[record_index])
                c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
                c_akey = ctypes.create_string_buffer("akey {0}".format(akey))
                val = self.ioreq.single_fetch(c_dkey, c_akey, len(indata) + 1)
                if indata != (repr(val.value)[1:-1]):
                    self.d_log.error("ERROR:Data mismatch for "
                                     "dkey = {0}, "
                                     "akey = {1}".format(
                                         "dkey {0}".format(dkey),
                                         "akey {0}".format(akey)))
                    self.fail(
                        "ERROR: Data mismatch for dkey = {0}, akey={1}".format(
                            "dkey {0}".format(dkey), "akey {0}".format(akey)))

                transaction_index = transaction_index + 1
                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0

    @avocado.fail_on(DaosApiError)
    @skipForTicket("DAOS-3208")
    def test_array_object_validation(self):
        """
        Test ID: DAOS-707
        Test Description: Write Avocado Test to verify Array data after
                          pool/container disconnect/reconnect.
        :avocado: tags=all,object,full_regression,small,array_object
        """
        self.d_log.info("Writing the Array Dataset")
        record_index = 0
        transaction = []
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                c_values = []
                value = ("{0}".format(str(akey)[0]) *
                         self.record_length[record_index])
                for item in range(self.array_size):
                    c_values.append(
                        (ctypes.create_string_buffer(value), len(value) + 1))
                c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
                c_akey = ctypes.create_string_buffer("akey {0}".format(akey))

                new_transaction = self.container.get_new_tx()
                self.ioreq.insert_array(c_dkey, c_akey, c_values,
                                        new_transaction)
                self.container.commit_tx(new_transaction)
                transaction.append(new_transaction)

                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0

        self.reconnect()

        self.d_log.info("Array Dataset Verification -- Started")
        record_index = 0
        transaction_index = 0
        for dkey in range(self.no_of_dkeys):
            for akey in range(self.no_of_akeys):
                indata = []
                value = ("{0}".format(str(akey)[0]) *
                         self.record_length[record_index])
                for item in range(self.array_size):
                    indata.append(value)
                c_dkey = ctypes.create_string_buffer("dkey {0}".format(dkey))
                c_akey = ctypes.create_string_buffer("akey {0}".format(akey))
                c_rec_count = ctypes.c_uint(len(indata))
                c_rec_size = ctypes.c_size_t(len(indata[0]) + 1)

                outdata = self.ioreq.fetch_array(c_dkey, c_akey, c_rec_count,
                                                 c_rec_size)

                for item in enumerate(indata):
                    if indata[item[0]] != outdata[item[0]][:-1]:
                        self.d_log.error("ERROR:Data mismatch for "
                                         "dkey = {0}, "
                                         "akey = {1}".format(
                                             "dkey {0}".format(dkey),
                                             "akey {0}".format(akey)))
                        self.fail(
                            "ERROR:Data mismatch for dkey = {0}, akey={1}".
                            format("dkey {0}".format(dkey),
                                   "akey {0}".format(akey)))

                transaction_index = transaction_index + 1
                record_index = record_index + 1
                if record_index == len(self.record_length):
                    record_index = 0
Ejemplo n.º 9
0
    def test_metadata_fillup(self):
        """JIRA ID: DAOS-1512.

        Test Description:
            Test to verify no IO happens after metadata is full.

        Use Cases:
            ?

        :avocado: tags=all,metadata,large,metadatafill,hw
        :avocado: tags=full_regression
        """

        # 3 Phases in nested try/except blocks below
        # Phase 1: nearly fill pool metadata with container creates
        #          no DaosApiError expected in this phase (otherwise fail test)
        #
        # Phase 2: if Phase 1 passed:
        #          overload pool metadata with another container create loop
        #          DaosApiError IS expected here (otherwise fail test)
        #
        # Phase 3: if Phase 2 passed:
        #          clean up all containers created (prove "critical" destroy
        #          in rdb (and vos) works without cascading nospace errors

        self.pool.pool.connect(2)

        self.log.info("Phase 1: Fillup Metadata (expected to work) ...")
        container_array = []
        try:
            # Phase 1 container creates
            for _cont in range(NO_OF_MAX_CONTAINER):
                container = DaosContainer(self.context)
                container.create(self.pool.pool.handle)
                container_array.append(container)

            self.log.info("Phase 1: pass (all container creates successful)")

            # Phase 2: should fail with no Metadata space Error.
            try:
                self.log.info(
                    "Phase 2: Metadata Overload (expect to fail) ...")
                for _cont in range(400):
                    container = DaosContainer(self.context)
                    container.create(self.pool.pool.handle)
                    container_array.append(container)

                # Phase 2 failed - stop here
                self.fail("Phase 2: fail (expected container create failure)")

            # Phase 2 DaosApiError (expected - proceed to Phase 3)
            except DaosApiError:
                self.log.info("Phase 2: pass (container create failed as "
                              "expected)")

                # Phase 3 clean up containers (expected to succeed)
                try:
                    self.log.info("Phase 3: Cleaning up containers after "
                                  "DaosApiError (expected to work)")
                    for container in container_array:
                        container.destroy()
                    self.log.info("Phase 3: pass (containers destroyed "
                                  "successfully)")
                    return

                except DaosApiError as exe3:
                    print(exe3, traceback.format_exc())
                    self.fail("Phase 3: fail (container destroy error)")

        # Phase 1 got DaosApiError (not expected)
        except DaosApiError as exe1:
            print(exe1, traceback.format_exc())
            self.fail(
                "Phase 1: failure (container creates should have worked)")
Ejemplo n.º 10
0
class ContainerAsync(TestWithServers):
    """
    Tests DAOS pool connect permissions (non existing pool handle, bad uuid)
    and close.

    :avocado: recursive
    """
    def __init__(self, *args, **kwargs):
        super(ContainerAsync, self).__init__(*args, **kwargs)
        self.container1 = None
        self.container2 = None
        self.pool = None

    def test_createasync(self):
        """
        Test container create for asynchronous mode.

        :avocado: tags=all,small,full_regression,container,createasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)

            GLOB_SIGNAL = threading.Event()
            self.container1.create(poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print("RC after successful container create: ", GLOB_RC)

            # Try to recreate container after destroying pool,
            # this should fail. Checking rc after failure.
            self.pool.disconnect()
            self.pool.destroy(1)
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.create(poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test")
            print("RC after unsuccessful container create: ", GLOB_RC)

            # cleanup the pool and container
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())

    def test_destroyasync(self):
        """
        Test container destroy for asynchronous mode.

        :avocado: tags=all,small,full_regression,container,contdestroyasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)

            self.container1.create(poh)

            GLOB_SIGNAL = threading.Event()
            self.container1.destroy(1, poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print("RC after successful container create: ", GLOB_RC)

            # Try to destroy container again, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.destroy(1, poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != -1003:
                self.fail("RC not as expected in async test")
            print("RC after container destroy failed:", GLOB_RC)
        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())

    def test_openasync(self):
        """
        Test container open for asynchronous mode.

        :avocado: tags=all,small,full_regression,container,openasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)

            self.container1.create(poh)

            str_cuuid = self.container1.get_uuid_str()
            cuuid = uuid.UUID(str_cuuid)

            GLOB_SIGNAL = threading.Event()
            self.container1.open(poh, cuuid, 2, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print("RC after successful container create: ", GLOB_RC)

            # Try to open container2, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.open(None, None, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test")
            print("RC after container destroy failed:", GLOB_RC)

            # cleanup the container
            self.container1.close()
            self.container1.destroy()
        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())

    def test_closeasync(self):
        """
        Test container close for asynchronous mode.

        :avocado: tags=all,small,full_regression,container,closeasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)

            self.container1.create(poh)

            str_cuuid = self.container1.get_uuid_str()
            cuuid = uuid.UUID(str_cuuid)

            self.container1.open(poh, cuuid, 2)

            GLOB_SIGNAL = threading.Event()
            self.container1.close(cb_func=cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test: "
                          "{0}".format(GLOB_RC))
            print("RC after successful container create: ", GLOB_RC)

            # Try to open container2, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.close(cb_func=cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test: "
                          "{0}".format(GLOB_RC))
            print("RC after container destroy failed:", GLOB_RC)

            # cleanup the container
            self.container1.destroy()
        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())

    def test_queryasync(self):
        """
        Test container query for asynchronous mode.

        :avocado: tags=all,small,full_regression,container,queryasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)

            self.container1.create(poh)

            dummy_str_cuuid = self.container1.get_uuid_str()

            # Open container
            self.container1.open(poh, None, 2, None)

            GLOB_SIGNAL = threading.Event()
            self.container1.query(cb_func=cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test: "
                          "{0}".format(GLOB_RC))
            print("RC after successful container create: ", GLOB_RC)

            # Close opened container
            self.container1.close()

            # Try to open container2, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.query(cb_func=cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test: "
                          "{0}".format(GLOB_RC))
            print("RC after container destroy failed:", GLOB_RC)

            # cleanup the container
            self.container1.destroy()
        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
Ejemplo n.º 11
0
class DeleteContainerTest(TestWithServers):
    """
    Tests DAOS container delete and close.
    :avocado: recursive
    """
    def test_container_delete(self):
        """
        Test basic container delete

        :avocado: tags=all,container,tiny,smoke,full_regression,contdelete
        """
        expected_for_param = []
        uuidlist = self.params.get("uuid",
                                   '/run/createtests/ContainerUUIDS/*/')
        cont_uuid = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        pohlist = self.params.get("poh", '/run/createtests/PoolHandles/*/')
        poh = pohlist[0]
        expected_for_param.append(pohlist[1])

        openlist = self.params.get("opened",
                                   "/run/createtests/ConnectionOpened/*/")
        opened = openlist[0]
        expected_for_param.append(openlist[1])

        forcelist = self.params.get("force",
                                    "/run/createtests/ForceDestroy/*/")
        force = forcelist[0]

        # force=0 in .yaml file specifies FAIL, however:
        # if not opened and force=0 expect pass
        if force == 0 and not opened:
            expected_for_param.append('PASS')
        else:
            expected_for_param.append(forcelist[1])

        # opened=True in .yaml file specifies PASS, however
        # if it is also the case force=0, then FAIL is expected

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        # initialize a python pool object then create the underlying
        # daos storage and connect to it
        self.prepare_pool()

        passed = False
        try:
            self.container = DaosContainer(self.context)

            # create should always work (testing destroy)
            if not cont_uuid == 'INVALID':
                cont_uuid = uuid.UUID(uuidlist[0])
                save_cont_uuid = cont_uuid
                self.container.create(self.pool.pool.handle, cont_uuid)
            else:
                self.container.create(self.pool.pool.handle)
                save_cont_uuid = uuid.UUID(self.container.get_uuid_str())

            # Opens the container if required
            if opened:
                self.container.open(self.pool.pool.handle)

            # wait a few seconds and then attempts to destroy container
            time.sleep(5)
            if poh == 'VALID':
                poh = self.pool.pool.handle

            # if container is INVALID, overwrite with non existing UUID
            if cont_uuid == 'INVALID':
                cont_uuid = uuid.uuid4()

            self.container.destroy(force=force, poh=poh, con_uuid=cont_uuid)

            passed = True

        except DaosApiError as excep:
            self.log.info(excep, traceback.format_exc())
            self.container.destroy(force=1,
                                   poh=self.pool.pool.handle,
                                   con_uuid=save_cont_uuid)

        finally:
            # close container handle, release a reference on pool in client lib
            # Otherwise test will ERROR in tearDown (pool disconnect -DER_BUSY)
            if opened:
                self.container.close()

            self.container = None

            if expected_result == 'PASS' and not passed:
                self.fail("Test was expected to pass but it failed.\n")
            if expected_result == 'FAIL' and passed:
                self.fail("Test was expected to fail but it passed.\n")
Ejemplo n.º 12
0
    def test_tgt_dkey_akey_punch(self):
        """Test active and latency for tgt punch.

        This case is the same as the metrics 7 and 8 in
        test_dkey_akey_enum_punch() except the metrics have tgt and we need to
        use OC_RP_2G1.

        Test Steps:
        1. Write 100 objects.
        2. Insert 2 dkeys per object. Insert 1 akey per dkey. Use OC_RP_2G1.
        3. Punch all akeys and dkeys.
        4. Verify the metrics below.

        --- Metrics tested ---
        1. tgt dkey punch active and latency.
        engine_io_ops_tgt_dkey_punch_active_max
        engine_io_ops_tgt_dkey_punch_active_mean
        engine_io_ops_tgt_dkey_punch_active_min
        engine_io_ops_tgt_dkey_punch_active_stddev
        engine_io_ops_tgt_dkey_punch_latency_max
        engine_io_ops_tgt_dkey_punch_latency_mean
        engine_io_ops_tgt_dkey_punch_latency_min
        engine_io_ops_tgt_dkey_punch_latency_stddev

        2. tgt akey punch active and latency.
        engine_io_ops_tgt_akey_punch_active_max
        engine_io_ops_tgt_akey_punch_active_mean
        engine_io_ops_tgt_akey_punch_active_min
        engine_io_ops_tgt_akey_punch_active_stddev
        engine_io_ops_tgt_akey_punch_latency_max
        engine_io_ops_tgt_akey_punch_latency_mean
        engine_io_ops_tgt_akey_punch_latency_min
        engine_io_ops_tgt_akey_punch_latency_stddev

        :avocado: tags=all,full_regression
        :avocado: tags=vm
        :avocado: tags=telemetry
        :avocado: tags=tgt_dkey_akey_punch
        """
        self.add_pool()

        self.set_num_targets()

        container = DaosContainer(self.context)
        container.create(self.pool.pool.handle)
        container.open()

        # Object type needs to be OC_RP_2G1 because we're testing tgt.
        self.write_objects_insert_keys(container=container,
                                       objtype=DaosObjClass.OC_RP_2G1)

        self.punch_all_keys()

        self.telemetry.dmg.verbose = False

        ### Verify active and latency; metrics 1 and 2. ###
        # Verify tgt dkey punch active.
        self.verify_active_latency(
            prefix="engine_io_ops_tgt_dkey_punch_active_", test_latency=False)

        # Verify dkey punch latency.
        self.verify_active_latency(
            prefix="engine_io_ops_tgt_dkey_punch_latency_", test_latency=True)

        # Verify akey punch active.
        self.verify_active_latency(
            prefix="engine_io_ops_tgt_akey_punch_active_", test_latency=False)

        # Verify akey punch latency.
        self.verify_active_latency(
            prefix="engine_io_ops_tgt_akey_punch_latency_", test_latency=True)

        if self.errors:
            self.fail("\n----- Errors detected! -----\n{}".format("\n".join(
                self.errors)))

        container.destroy()
        self.pool.destroy(disconnect=0)
Ejemplo n.º 13
0
    def test_pool_tgt_dkey_akey_punch(self):
        """Test punch count for tgt values.

        tgt is related to replication, so the test step is similar to above,
        but we use the replication object class OC_RP_2G1.

        Test Steps:
        1. Write 100 objects.
        2. Insert 2 dkeys per object. Insert 1 akey per dkey. Use OC_RP_2G1.
        3. Punch all akeys and dkeys.
        4. Verify the metrics below.

        --- Metrics Tested ---
        1. engine_pool_ops_tgt_dkey_punch
        Number of dkeys punched. There are 200 dkeys total and we punch them one
        at a time, so we expect this value to sum up to 200.

        2. engine_pool_ops_akey_punch
        Number of akeys punched. There are 200 akeys total and we punch them one
        at a time, so we expect this value to sum up to 200.

        :avocado: tags=all,full_regression
        :avocado: tags=vm
        :avocado: tags=telemetry
        :avocado: tags=pool_tgt_dkey_akey_punch
        """
        self.add_pool()

        self.set_num_targets()

        container = DaosContainer(self.context)
        container.create(self.pool.pool.handle)
        container.open()

        # Create objects and dkey/akey in it. Use RP_2G1 for tgt metrics.
        self.write_objects_insert_keys(container=container,
                                       objtype=DaosObjClass.OC_RP_2G1)

        # Punch the akeys and the dkeys in the objects.
        self.punch_all_keys()

        self.telemetry.dmg.verbose = False

        ### Obtain and verify the pool metrics 1 and 2 ###
        pool_tgt_dkey_punch = self.telemetry.ENGINE_POOL_METRICS[21]
        pool_tgt_akey_punch = self.telemetry.ENGINE_POOL_METRICS[20]
        specific_metrics = [pool_tgt_dkey_punch, pool_tgt_akey_punch]
        pool_out = self.telemetry.get_pool_metrics(
            specific_metrics=specific_metrics)

        # Verify tgt_dkey_punch total is 200.
        tgt_dkey_punch_total = self.sum_values(
            metric_out=pool_out[pool_tgt_dkey_punch])
        if tgt_dkey_punch_total != 200:
            msg = "tgt dkey punch total is not 200! Actual = {}".format(
                tgt_dkey_punch_total)
            self.errors.append(msg)

        # Verify tgt_akey_punch total is 200.
        tgt_akey_punch_total = self.sum_values(
            metric_out=pool_out[pool_tgt_akey_punch])
        if tgt_akey_punch_total != 200:
            msg = "tgt akey punch total is not 200! Actual = {}".format(
                tgt_akey_punch_total)
            self.errors.append(msg)

        if self.errors:
            self.fail("\n----- Errors detected! -----\n{}".format("\n".join(
                self.errors)))

        container.destroy()
        self.pool.destroy(disconnect=0)
Ejemplo n.º 14
0
    def test_array_obj(self):
        """
        Test ID: DAOS-961

        Test Description: Writes an array to an object and then reads it
        back and verifies it.

        :avocado: tags=all,smoke,pr,object,tiny,basicobject
        """
        self.prepare_pool()

        try:
            # create a container
            container = DaosContainer(self.context)
            container.create(self.pool.pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            container.query()

            if container.get_uuid_str() != c_uuid_to_str(
                    container.info.ci_uuid):
                self.fail("Container UUID did not match the one in info\n")

            # create an object and write some data into it
            thedata = []
            thedata.append("data string one")
            thedata.append("data string two")
            thedata.append("data string tre")
            dkey = "this is the dkey"
            akey = "this is the akey"

            self.plog.info("writing array to dkey >%s< akey >%s<.", dkey, akey)
            oid = container.write_an_array_value(thedata,
                                                 dkey,
                                                 akey,
                                                 obj_cls=3)

            # read the data back and make sure its correct
            length = len(thedata[0])
            thedata2 = container.read_an_array(len(thedata), length + 1, dkey,
                                               akey, oid)
            if thedata[0][0:length - 1] != thedata2[0][0:length - 1]:
                self.plog.error("Data mismatch")
                self.plog.error("Wrote: >%s<", thedata[0])
                self.plog.error("Read: >%s<", thedata2[0])
                self.fail("Write data, read it back, didn't match\n")

            if thedata[2][0:length - 1] != thedata2[2][0:length - 1]:
                self.plog.error("Data mismatch")
                self.plog.error("Wrote: >%s<", thedata[2])
                self.plog.error("Read: >%s<", thedata2[2])
                self.fail("Write data, read it back, didn't match\n")

            container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            container.destroy()

            self.plog.info("Test Complete")

        except DaosApiError as excep:
            self.plog.error("Test Failed, exception was thrown.")
            print(excep)
            print(traceback.format_exc())
            self.fail("Test was expected to pass but it failed.\n")
Ejemplo n.º 15
0
    def test_bad_handle(self):
        """
        Test ID: DAOS-1376

        Test Description: Pass a bogus object handle, should return bad handle.

        :avocado: tags=all,object,full_regression,small,objbadhand
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid, createsize,
                        createsetid, None)
            self.plog.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # create an object and write some data into it
            thedata = "a string that I want to stuff into an object"
            thedatasize = len(thedata) + 1
            dkey = "this is the dkey"
            akey = "this is the akey"
            obj, dummy_tx = container.write_an_obj(thedata, thedatasize, dkey,
                                                   akey, None, None, 2)

            saved_oh = obj.obj_handle
            obj.obj_handle = 99999

            obj, dummy_tx = container.write_an_obj(thedata, thedatasize, dkey,
                                                   akey, obj, None, 2)

            container.oh = saved_oh
            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            self.fail("Test was expected to return a -1002 but it has not.\n")

        except DaosApiError as excep:
            container.oh = saved_oh
            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            self.plog.info("Test Complete")
            if '-1002' not in str(excep):
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1002 but it has not.\n")
Ejemplo n.º 16
0
    def test_tx_basics(self):
        """
        Perform I/O to an object in a container in 2 different transactions,
        verifying basic I/O and transactions in particular.

        NOTE: this was an epoch test and all I did was get it working with tx
        Not a good test at this point, need to redesign when tx is fully
        working.

        :avocado: tags=all,container,tx,small,smoke,pr,basictx
        """
        self.pool = None

        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/poolparams/createmode/')
            createuid = os.geteuid()
            creategid = os.getegid()
            createsetid = self.params.get("setname",
                                          '/run/poolparams/createset/')
            createsize = self.params.get("size", '/run/poolparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            # need a connection to create container
            self.pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(self.pool.handle)

            # now open it
            container.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            container.query()

            if container.get_uuid_str() != c_uuid_to_str(
                    container.info.ci_uuid):
                self.fail("Container UUID did not match the one in info\n")

            # create an object and write some data into it
            thedata = "a string that I want to stuff into an object"
            thedatasize = 45
            dkey = "this is the dkey"
            akey = "this is the akey"

            oid, txn = container.write_an_obj(thedata, thedatasize,
                                              dkey, akey, None, None, 2)

            # read the data back and make sure its correct
            thedata2 = container.read_an_obj(thedatasize, dkey, akey,
                                             oid, txn)
            if thedata != thedata2.value:
                print("thedata>" + thedata)
                print("thedata2>" + thedata2.value)
                self.fail("Write data 1, read it back, didn't match\n")

            # repeat above, but know that the write_an_obj call is advancing
            # the epoch so the original copy remains and the new copy is in
            # a new epoch.
            thedata3 = "a different string"
            thedatasize2 = 19
            # note using the same keys so writing to the same spot
            dkey = "this is the dkey"
            akey = "this is the akey"

            oid, tx2 = container.write_an_obj(thedata3, thedatasize2,
                                              dkey, akey, oid, None, 2)

            # read the data back and make sure its correct
            thedata4 = container.read_an_obj(thedatasize2, dkey, akey,
                                             oid, tx2)
            if thedata3 != thedata4.value:
                self.fail("Write data 2, read it back, didn't match\n")

            # transactions generally don't work this way but need to explore
            # an alternative to below code once model is complete, maybe
            # read from a snapshot or read from TX_NONE etc.

            # the original data should still be there too
            #thedata5 = container.read_an_obj(thedatasize, dkey, akey,
            #                                 oid, transaction)
            #if thedata != thedata5.value:
            #    self.fail("Write data 3, read it back, didn't match\n")

            container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            container.destroy()

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test was expected to pass but it failed.\n")
Ejemplo n.º 17
0
class ObjFetchBadParam(TestWithServers):
    """
    Test Class Description:
    Pass an assortment of bad parameters to the daos_obj_fetch function.
    :avocado: recursive
    """
    def setUp(self):
        super(ObjFetchBadParam, self).setUp()
        time.sleep(5)

        self.prepare_pool()

        try:
            # create a container
            self.container = DaosContainer(self.context)
            self.container.create(self.pool.pool.handle)

            # now open it
            self.container.open()

            # create an object and write some data into it
            thedata = "a string that I want to stuff into an object"
            self.datasize = len(thedata) + 1
            self.dkey = "this is the dkey"
            self.akey = "this is the akey"
            self.obj = self.container.write_an_obj(thedata, self.datasize,
                                                   self.dkey, self.akey, None,
                                                   None, 2)

            thedata2 = self.container.read_an_obj(self.datasize, self.dkey,
                                                  self.akey, self.obj)
            if thedata not in thedata2.value:
                print(thedata)
                print(thedata2.value)
                self.fail("Error reading back data, test failed during"\
                         " the initial setup.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test failed during the initial setup.\n")

    def test_bad_handle(self):
        """
        Test ID: DAOS-1377

        Test Description: Pass a bogus object handle, should return bad handle.

        :avocado: tags=all,object,full_regression,small,objbadhandle
        """

        try:
            # trash the handle and read again
            saved_oh = self.obj.obj_handle
            self.obj.obj_handle = 99999

            # expecting this to fail with -1002
            dummy_thedata2 = self.container.read_an_obj(
                self.datasize, self.dkey, self.akey, self.obj)

            self.container.oh = saved_oh
            self.fail("Test was expected to return a -1002 but it has not.\n")

        except DaosApiError as excep:
            self.container.oh = saved_oh
            if '-1002' not in str(excep):
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1002 but it has not.\n")

    def test_null_ptrs(self):
        """
        Test ID: DAOS-1377

        Test Description: Pass null pointers for various fetch parameters.

        :avocado: tags=all,object,full_regression,small,objfetchnull
        """
        try:
            # now try it with a bad dkey, expecting this to fail with -1003
            dummy_thedata2 = self.container.read_an_obj(
                self.datasize, None, self.akey, self.obj)

            self.container.close()
            self.container.destroy()
            self.pool.destroy(1)
            self.fail("Test was expected to return a -1003 but it has not.\n")

        except DaosApiError as excep:
            if '-1003' not in str(excep):
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1003 but it has not.\n")

        try:
            # now try it with a null sgl (iod_size is not set)
            # expecting this to fail with -2013
            test_hints = ['sglnull']
            dummy_thedata2 = self.container.read_an_obj(
                self.datasize, self.dkey, self.akey, self.obj, test_hints)

            # behavior not as expect so commented out for now
            # when DAOS-1448 is complete, uncomment and retest

            self.fail("Test was expected to return a -2013 but it has not.\n")

        except DaosApiError as excep:
            if '-2013' not in str(excep):
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -2013 but it has not.\n")

        try:
            # when DAOS-1449 is complete, uncomment and retest
            # now try it with a null iod, expecting this to fail with -1003
            #test_hints = ['iodnull']
            #thedata2 = self.container.read_an_obj(self.datasize, dkey, akey,
            #                                 self.obj, test_hints)
            pass
            #self.fail("Test was expected to return a -1003 but it has not.\n")

        except DaosApiError as excep:
            if '-1003' not in str(excep):
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1003 but it has not.\n")
Ejemplo n.º 18
0
    def test_metadata_fillup(self):
        """JIRA ID: DAOS-1512.

        Test Description:
            Test to verify no IO happens after metadata is full.

        Use Cases:
            ?

        :avocado: tags=all,metadata,large,metadatafill,hw
        :avocado: tags=full_regression
        """

        # 3 Phases in nested try/except blocks below
        # Phase 1: overload pool metadata with a container create loop
        #          DaosApiError expected here (otherwise fail test)
        #
        # Phase 2: if Phase 1 passed:
        #          clean up all containers created (prove "critical" destroy
        #          in rdb (and vos) works without cascading nospace errors
        #
        # Phase 3: if Phase 2 passed:
        #          Sustained container create loop, eventually encountering
        #          -DER_NOSPACE, perhaps some successful creates (due to
        #          rdb log compaction, eventually settling into continuous
        #          -DER_NOSPACE. Make sure service keeps running.
        #
        self.pool.pool.connect(2)

        self.log.info("Phase 1: Fillup Metadata (expected to fail) ...")
        container_array = []
        try:
            # Phase 1 container creates
            for _cont in range(NO_OF_MAX_CONTAINER + 1000):
                container = DaosContainer(self.context)
                container.create(self.pool.pool.handle)
                container_array.append(container)

        # Phase 1 got DaosApiError (expected - proceed to Phase 2)
        except DaosApiError:
            self.log.info("Phase 1: passed (container create %d failed after "
                          "metadata full)", _cont)

            # Phase 2 clean up containers (expected to succeed)
            try:
                self.log.info("Phase 2: Cleaning up containers after "
                              "DaosApiError (expected to work)")
                for container in container_array:
                    container.destroy()
                self.log.info("Phase 2: pass (containers destroyed "
                              "successfully)")

                # Phase 3 sustained container creates even after nospace error
                # Due to rdb log compaction after initial nospace errors,
                # Some brief periods of available space will occur, allowing
                # a few container creates to succeed in the interim.
                self.log.info("Phase 3: sustained container creates: "
                              "to nospace and beyond")
                big_array = []
                in_failure = False
                for _cont in range(30000):
                    try:
                        container = DaosContainer(self.context)
                        container.create(self.pool.pool.handle)
                        big_array.append(container)
                        if in_failure:
                            self.log.info("Phase 3: nospace -> available "
                                          "transition, cont %d", _cont)
                            in_failure = False
                    except DaosApiError:
                        if not in_failure:
                            self.log.info("Phase 3: available -> nospace "
                                          "transition, cont %d", _cont)
                        in_failure = True

                self.log.info("Phase 3: passed (created %d / %d containers)",
                              len(big_array), 30000)
                return

            except DaosApiError as exe2:
                print(exe2, traceback.format_exc())
                self.fail("Phase 2: fail (unexpected container destroy error)")

        # Phase 1 failure
        self.fail("Phase 1: failed (metadata full error did not occur)")
Ejemplo n.º 19
0
class CreateManyDkeys(Test):
    """
    Test Class Description:
        Tests that create large numbers of keys in objects/containers and then
        destroy the containers and verify the space has been reclaimed.

    :avocado: recursive
    """
    def setUp(self):
        super(CreateManyDkeys, self).setUp()
        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(), os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         None)
        self.pool.connect(1 << 1)

    def tearDown(self):
        try:
            if self.pool:
                self.pool.disconnect()
                self.pool.destroy(1)
        finally:
            super(CreateManyDkeys, self).tearDown()

    def write_a_bunch_of_values(self, how_many):
        """
        Write data to an object, each with a dkey and akey.  The how_many
        parameter determines how many key:value pairs are written.
        """

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.handle)
        self.container.open()

        ioreq = IORequest(self.context, self.container, None)
        epoch = self.container.get_new_tx()
        c_epoch = ctypes.c_uint64(epoch)

        print("Started Writing the Dataset-----------\n")
        inc = 50000
        last_key = inc
        for key in range(how_many):
            c_dkey = ctypes.create_string_buffer("dkey {0}".format(key))
            c_akey = ctypes.create_string_buffer("akey {0}".format(key))
            c_value = ctypes.create_string_buffer(
                "some data that gets stored with the key {0}".format(key))
            c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
            ioreq.single_insert(c_dkey, c_akey, c_value, c_size, c_epoch)

            if key > last_key:
                print("written: {}".format(key))
                sys.stdout.flush()
                last_key = key + inc

        self.container.commit_tx(c_epoch)

        print("Started Verification of the Dataset-----------\n")
        last_key = inc
        for key in range(how_many):
            c_dkey = ctypes.create_string_buffer("dkey {0}".format(key))
            c_akey = ctypes.create_string_buffer("akey {0}".format(key))
            the_data = "some data that gets stored with the key {0}".format(
                key)
            val = ioreq.single_fetch(c_dkey, c_akey,
                                     len(the_data) + 1, c_epoch)

            if the_data != (repr(val.value)[1:-1]):
                self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}, "
                          "Expected Value={2} and Received Value={3}\n".format(
                              "dkey {0}".format(key), "akey {0}".format(key),
                              the_data,
                              repr(val.value)[1:-1]))

            if key > last_key:
                print("veried: {}".format(key))
                sys.stdout.flush()
                last_key = key + inc

        print("starting destroy")
        self.container.close()
        self.container.destroy()
        print("destroy complete")

    @avocado.fail_on(DaosApiError)
    @skipForTicket("DAOS-1721")
    def test_many_dkeys(self):
        """
        Test ID: DAOS-1701
        Test Description: Test many of dkeys in same object.
        Use Cases: 1. large key counts
                   2. space reclaimation after destroy
        :avocado: tags=all,full,small,object,many_dkeys

        """

        no_of_dkeys = self.params.get("number_of_dkeys", '/run/dkeys/')

        # write a lot of individual data items, verify them, then destroy
        self.write_a_bunch_of_values(no_of_dkeys)

        # do it again, which should verify the first container
        # was truely destroyed because a second round won't fit otherwise
        self.write_a_bunch_of_values(no_of_dkeys)