Beispiel #1
0
    def setUp(self):
        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)
        basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        tmp = build_paths['PREFIX'] + '/tmp'
        server_group = self.params.get("server_group", '/server/',
                                       'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, tmp)

        ServerUtils.runServer(self.hostfile, server_group, basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(), os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         None)
        self.pool.connect(1 << 1)
        poh = self.pool.handle
        self.container = DaosContainer(self.context)
        self.container.create(poh)
        self.container.open()
Beispiel #2
0
    def test_createasync(self):
        """
        Test container create for asynchronous mode.

        :avocado: tags=container,containerasync,createasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)


            GLOB_SIGNAL = threading.Event()
            self.container1.create(poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print("RC after successful container create: ", GLOB_RC)

            # Try to recreate container after destroying pool,
            # this should fail. Checking rc after failure.
            self.pool.destroy(1)
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.create(poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test")
            print("RC after unsuccessful container create: ", GLOB_RC)

            # cleanup the pool and container
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
Beispiel #3
0
def check_handle(buf_len, iov_len, buf, uuidstr, rank):
    """
    This gets run in a child process and verifyes the global
    handle can be turned into a local handle in another process.
    """

    try:
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)

        # setup the DAOS python API in this process
        context = DaosContext(build_paths['PREFIX'] + '/lib/')

        pool = DaosPool(context)
        pool.set_uuid_str(uuidstr)
        pool.set_svc(rank)
        pool.group = "daos_server"

        # note that the handle is stored inside the pool as well
        dummy_local_handle = pool.global2local(context, iov_len, buf_len, buf)

        # perform some operations that will use the new handle
        pool.pool_query()
        container = DaosContainer(context)
        container.create(pool.handle)

    except DaosApiError as excep:
        print(excep)
        print(traceback.format_exc())
        raise

    return
Beispiel #4
0
def check_handle(buf_len, iov_len, buf, uuidstr, rank):
    """
    This gets run in a child process and verifyes the global
    handle can be turned into a local handle in another process.
    """

    try:
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)

        # setup the DAOS python API in this process
        context = DaosContext(build_paths['PREFIX'] + '/lib/')

        pool = DaosPool(context)
        pool.set_uuid_str(uuidstr)
        pool.set_svc(rank)
        pool.group = "daos_server"

        # note that the handle is stored inside the pool as well
        dummy_local_handle = pool.global2local(context, iov_len, buf_len, buf)

        # perform some operations that will use the new handle
        pool.pool_query()
        container = DaosContainer(context)
        container.create(pool.handle)

    except DaosApiError as excep:
        print(excep)
        print(traceback.format_exc())
        raise

    return
Beispiel #5
0
    def setUp(self):
        try:
            super(PunchTest, self).setUp()

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/pool/createmode/')
            createsetid = self.params.get("setname", '/run/pool/createset/')
            createsize = self.params.get("size", '/run/pool/createsize/')

            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None)
            self.pool.connect(1 << 1)

            # create a container
            self.container = DaosContainer(self.context)
            self.container.create(self.pool.handle)

            # now open it
            self.container.open()
        except DaosApiError as excpn:
            print(excpn)
            print(traceback.format_exc())
            self.fail("Test failed during setup.\n")
Beispiel #6
0
    def test_createasync(self):
        """
        Test container create for asynchronous mode.

        :avocado: tags=container,containerasync,createasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode",'/run/createtests/createmode/*/')
        createsetid = self.params.get("setname",'/run/createtests/createset/')
        createsize  = self.params.get("size",'/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.POOL = DaosPool(self.Context)

            self.POOL.create(createmode, createuid, creategid,
                    createsize, createsetid, None)

            poh = self.POOL.handle

            self.POOL.connect(1 << 1)

            # Container initialization and creation
            self.Container1 = DaosContainer(self.Context)
            self.Container2 = DaosContainer(self.Context)


            GLOB_SIGNAL = threading.Event()
            self.Container1.create(poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print ("RC after successful Container create: " , GLOB_RC)

            # Try to recreate container after destroying pool,
            # this should fail. Checking rc after failure.
            self.POOL.destroy(1)
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.Container2.create(poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != -1005:
                self.fail("RC not as expected in async test")
            print ("RC after Container create failed:", GLOB_RC)

            # cleanup the Pool and Container
            self.POOL = None

        except ValueError as e:
            print e
            print traceback.format_exc()
Beispiel #7
0
    def test_filemodification(self):
        """
        Test whether file modification happens as expected under different
        permission levels.

        :avocado: tags=pool,permission,filemodification
        """
        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createuid = self.params.get("uid", '/run/createtests/createuid/')
        creategid = self.params.get("gid", '/run/createtests/creategid/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')

        if createmode == 73:
            expected_result = 'FAIL'
        elif createmode in [146, 511]:
            permissions = 1
            expected_result = 'PASS'
        elif createmode == 292:
            permissions = 2
            expected_result = 'PASS'

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.Context)
            self.d_log.debug("Pool initialisation successful")
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None)
            self.d_log.debug("Pool Creation successful")

            self.pool.connect(1 << permissions)
            self.d_log.debug("Pool Connect successful")

            self.container = DaosContainer(self.Context)
            self.d_log.debug("Contianer initialisation successful")

            self.container.create(self.pool.handle)
            self.d_log.debug("Container create successful")

            # now open it
            self.container.open()
            self.d_log.debug("Container open successful")

            thedata = "a string that I want to stuff into an object"
            size = 45
            dkey = "this is the dkey"
            akey = "this is the akey"

            self.container.write_an_obj(thedata, size, dkey, akey)
            self.d_log.debug("Container write successful")
            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as e:
            print(e)
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
Beispiel #8
0
    def write_a_bunch_of_values(self, how_many):
        """
        Write data to an object, each with a dkey and akey.  The how_many
        parameter determines how many key:value pairs are written.
        """

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.handle)
        self.container.open()

        ioreq = IORequest(self.context, self.container, None)
        epoch = self.container.get_new_tx()
        c_epoch = ctypes.c_uint64(epoch)

        print("Started Writing the Dataset-----------\n")
        inc = 50000
        last_key = inc
        for key in range(how_many):
            c_dkey = ctypes.create_string_buffer("dkey {0}".format(key))
            c_akey = ctypes.create_string_buffer("akey {0}".format(key))
            c_value = ctypes.create_string_buffer(
                "some data that gets stored with the key {0}".format(key))
            c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
            ioreq.single_insert(c_dkey, c_akey, c_value, c_size, c_epoch)

            if key > last_key:
                print("written: {}".format(key))
                sys.stdout.flush()
                last_key = key + inc

        self.container.commit_tx(c_epoch)

        print("Started Verification of the Dataset-----------\n")
        last_key = inc
        for key in range(how_many):
            c_dkey = ctypes.create_string_buffer("dkey {0}".format(key))
            c_akey = ctypes.create_string_buffer("akey {0}".format(key))
            the_data = "some data that gets stored with the key {0}".format(
                key)
            val = ioreq.single_fetch(c_dkey, c_akey,
                                     len(the_data) + 1, c_epoch)

            if the_data != (repr(val.value)[1:-1]):
                self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}, "
                          "Expected Value={2} and Received Value={3}\n".format(
                              "dkey {0}".format(key), "akey {0}".format(key),
                              the_data,
                              repr(val.value)[1:-1]))

            if key > last_key:
                print("veried: {}".format(key))
                sys.stdout.flush()
                last_key = key + inc

        print("starting destroy")
        self.container.close()
        self.container.destroy()
        print("destroy complete")
Beispiel #9
0
    def setUp(self):
        super(ObjOpenBadParam, self).setUp()
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/pool/createmode/')
            createsetid = self.params.get("setname", '/run/pool/createset/')
            createsize = self.params.get("size", '/run/pool/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            # need a connection to create container
            self.pool.connect(1 << 1)

            # create a container
            self.container = DaosContainer(self.context)
            self.container.create(self.pool.handle)

            # now open it
            self.container.open()

            # create an object and write some data into it
            thedata = "a string that I want to stuff into an object"
            self.datasize = len(thedata) + 1
            self.dkey = "this is the dkey"
            self.akey = "this is the akey"
            self.obj, self.epoch = self.container.write_an_obj(thedata,
                                                               self.datasize,
                                                               self.dkey,
                                                               self.akey,
                                                               obj_cls=1)

            thedata2 = self.container.read_an_obj(self.datasize, self.dkey,
                                                  self.akey, self.obj,
                                                  self.epoch)
            if thedata not in thedata2.value:
                print(thedata)
                print(thedata2.value)
                err_str = "Error reading back data, test failed during the " \
                          "initial setup."
                self.d_log.error(err_str)
                self.fail(err_str)

            # setup leaves object in open state, so closing to start clean
            self.obj.close()

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test failed during the initial setup.")
Beispiel #10
0
def check_handle(pool_glob_handle, uuidstr, cont_glob_handle, rank):
    """
    This gets run in a child process and verifyes the global
    handles can be turned into local handles in another process.
    """

    try:
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)

        # setup the DAOS python API in this process
        context = DaosContext(build_paths['PREFIX'] + '/lib/')


        # setup the pool and connect using global handle
        pool = DaosPool(context)
        pool.uuid = uuidstr
        pool.set_svc(rank)
        pool.group = "daos_server"
        buf = ctypes.cast(pool_glob_handle.iov_buf,
                          ctypes.POINTER(ctypes.c_byte *
                                         pool_glob_handle.iov_buf_len))
        buf2 = bytearray()
        buf2.extend(buf.contents)
        pool_handle = pool.global2local(context,
                                        pool_glob_handle.iov_len,
                                        pool_glob_handle.iov_buf_len,
                                        buf2)

        # perform an operation that will use the new handle, if it
        # doesn't throw an exception, then all is well.
        pool.pool_query()

        # setup the container and then connect using the global handle
        container = DaosContainer(context)
        container.poh = pool_handle
        buf = ctypes.cast(cont_glob_handle.iov_buf,
                          ctypes.POINTER(ctypes.c_byte *
                                         cont_glob_handle.iov_buf_len))
        buf2 = bytearray()
        buf2.extend(buf.contents)
        dummy_cont_handle = container.global2local(context,
                                                   cont_glob_handle.iov_len,
                                                   cont_glob_handle.iov_buf_len,
                                                   buf2)
        # just try one thing to make sure handle is good
        container.query()

    except DaosApiError as excep:
        print(excep)
        print(traceback.format_exc())
        raise

    return
Beispiel #11
0
    def create(self, uuid=None):
        """Create a container.

        Args:
            uuid (str, optional): contianer uuid. Defaults to None.
        """
        self.destroy()
        self.log.info("Creating a container")
        self.container = DaosContainer(self.pool.context)
        self.container.create(self.pool.pool.handle, uuid)
        self.uuid = self.container.get_uuid_str()
Beispiel #12
0
    def setUp(self):

        try:
            # get paths from the build_vars generated by build
            with open('../../../.build_vars.json') as f:
                build_paths = json.load(f)
                self.basepath = os.path.normpath(build_paths['PREFIX'] +
                                                 "/../")
                self.tmp = build_paths['PREFIX'] + '/tmp'

                self.server_group = self.params.get("server_group", '/server/',
                                                    'daos_server')

                # setup the DAOS python API
                self.Context = DaosContext(build_paths['PREFIX'] + '/lib/')

                self.hostlist = self.params.get("test_machines",
                                                '/run/hosts/*')
                self.hostfile = WriteHostFile.WriteHostFile(
                    self.hostlist, self.tmp)

                ServerUtils.runServer(self.hostfile, self.server_group,
                                      self.basepath)

                # parameters used in pool create
                createmode = self.params.get("mode", '/run/pool/createmode/')
                createsetid = self.params.get("setname",
                                              '/run/pool/createset/')
                createsize = self.params.get("size", '/run/pool/createsize/')

                createuid = os.geteuid()
                creategid = os.getegid()

                # initialize a python pool object then create the underlying
                # daos storage
                self.pool = DaosPool(self.Context)
                self.pool.create(createmode, createuid, creategid, createsize,
                                 createsetid, None)
                self.pool.connect(1 << 1)

                # create a container
                self.container = DaosContainer(self.Context)
                self.container.create(self.pool.handle)

                # now open it
                self.container.open()

        except DaosApiError as e:
            print(e)
            print(traceback.format_exc())
            self.fail("Test failed during setup.\n")
Beispiel #13
0
    def setUp(self):
        self.agent_sessions = None
        self.pool = None
        self.container = None
        self.obj = None
        self.ioreq = None
        self.hostlist = None
        self.hostfile = None
        self.no_of_dkeys = None
        self.no_of_akeys = None
        self.array_size = None
        self.record_length = None

        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)
        basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        server_group = self.params.get("name", '/server_config/',
                                       'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(
            self.hostlist, self.workdir)
        self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0]
        self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0]
        self.array_size = self.params.get("size", '/array_size/')
        self.record_length = self.params.get("length", '/run/record/*')

        self.agent_sessions = agent_utils.run_agent(basepath, self.hostlist)
        server_utils.run_server(self.hostfile, server_group, basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(), os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         None)
        self.pool.connect(2)

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.handle)
        self.container.open()

        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj,
                               objtype=4)
Beispiel #14
0
    def test_global_handle(self):
        """
        Test ID: DAO

        Test Description: Use a pool handle in another process.

        :avocado: tags=pool,poolhandle,vm,small,regression
        """

        try:

            # use the uid/gid of the user running the test, these should
            # be perfectly valid
            createuid = os.geteuid()
            creategid = os.getegid()

            # parameters used in pool create that are in yaml
            createmode = self.params.get("mode", '/run/testparams/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/testparams/createset/')
            createsize = self.params.get("size", '/run/testparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            pool.connect(1 << 1)

            # create a container just to make sure handle is good
            container = DaosContainer(self.context)
            container.create(pool.handle)

            # create a global handle
            iov_len, buf_len, buf = pool.local2global()

            # this should work in the future but need on-line server addition
            #arg_list = (buf_len, iov_len, buf, pool.get_uuid_str(), 0)
            #p = Process(target=check_handle, args=arg_list)
            #p.start()
            #p.join()
            # for now verifying global handle in the same process which is not
            # the intended use case
            check_handle(buf_len, iov_len, buf, pool.get_uuid_str(), 0)

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")
Beispiel #15
0
    def test_global_handle(self):
        """
        Test ID: DAO

        Test Description: Use a pool handle in another process.

        :avocado: tags=all,pool,pr,tiny,poolglobalhandle
        """

        try:

            # use the uid/gid of the user running the test, these should
            # be perfectly valid
            createuid = os.geteuid()
            creategid = os.getegid()

            # parameters used in pool create that are in yaml
            createmode = self.params.get("mode", '/run/testparams/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/testparams/createset/')
            createsize = self.params.get("size", '/run/testparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            pool.connect(1 << 1)

            # create a container just to make sure handle is good
            container = DaosContainer(self.context)
            container.create(pool.handle)

            # create a global handle
            iov_len, buf_len, buf = pool.local2global()

            # this should work in the future but need on-line server addition
            #arg_list = (buf_len, iov_len, buf, pool.get_uuid_str(), 0)
            #p = Process(target=check_handle, args=arg_list)
            #p.start()
            #p.join()
            # for now verifying global handle in the same process which is not
            # the intended use case
            check_handle(buf_len, iov_len, buf, pool.get_uuid_str(), 0)

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")
Beispiel #16
0
    def setUp(self):

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as finput:
            build_paths = json.load(finput)
        basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        server_group = self.params.get("name", '/server_config/',
                                       'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)

        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        hostfile = write_host_file.write_host_file(self.hostlist, self.workdir)

        server_utils.run_server(hostfile, server_group, basepath)

        # Set up the pool and container.
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/pool/createmode/')
            createsetid = self.params.get("setname", '/run/pool/createset/')
            createsize = self.params.get("size", '/run/pool/createsize/*')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None)

            # need a connection to create container
            self.pool.connect(1 << 1)

            # create a container
            self.container = DaosContainer(self.context)
            self.container.create(self.pool.handle)

            # now open it
            self.container.open()

        except DaosApiError as error:
            print(error)
            print(traceback.format_exc())
            self.fail("Test failed before snapshot taken")
Beispiel #17
0
    def create(self, uuid=None):
        """Create a container.

        Args:
            uuid (str, optional): contianer uuid. Defaults to None.
        """
        self.destroy()
        self.log.info("Creating a container with pool handle %s",
                      self.pool.pool.handle.value)
        self.container = DaosContainer(self.pool.context)
        kwargs = {"poh": self.pool.pool.handle}
        if uuid is not None:
            kwargs["con_uuid"] = uuid
        self._call_method(self.container.create, kwargs)
        self.uuid = self.container.get_uuid_str()
        self.log.info("  Container created with uuid {}".format(self.uuid))
Beispiel #18
0
    def setUp(self):
        self.agent_sessions = None
        self.pool = None
        self.container = None
        self.hostlist = None
        self.large_data_set = {}

        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        server_group = self.params.get("name",
                                       '/server_config/',
                                       'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist)
        server_utils.run_server(self.hostfile, server_group, basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/attrtests/createmode/*'),
                         os.geteuid(),
                         os.getegid(),
                         self.params.get("size", '/run/attrtests/createsize/*'),
                         self.params.get("setname",
                                         '/run/attrtests/createset/*'),
                         None)
        self.pool.connect(1 << 1)
        poh = self.pool.handle
        self.container = DaosContainer(self.context)
        self.container.create(poh)
        self.container.open()
Beispiel #19
0
    def setUp(self):
        super(ContainerAttributeTest, self).setUp()

        self.large_data_set = {}

        self.pool = DaosPool(self.context)
        self.pool.create(
            self.params.get("mode", '/run/attrtests/createmode/*'),
            os.geteuid(), os.getegid(),
            self.params.get("size", '/run/attrtests/createsize/*'),
            self.params.get("setname", '/run/attrtests/createset/*'), None)
        self.pool.connect(1 << 1)
        poh = self.pool.handle
        self.container = DaosContainer(self.context)
        self.container.create(poh)
        self.container.open()
Beispiel #20
0
    def setUp(self):
        """
        set up method
        """
        super(Snapshot, self).setUp()
        # get parameters from yaml file
        createmode = self.params.get("mode", '/run/poolparams/createmode/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/poolparams/createset/')
        createsize = self.params.get("size", '/run/poolparams/createsize/')
        self.log.info("==In setUp, self.context= %s", self.context)

        try:

            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None)

            # need a connection to the pool with rw permission
            #    DAOS_PC_RO = int(1 << 0)
            #    DAOS_PC_RW = int(1 << 1)
            #    DAOS_PC_EX = int(1 << 2)
            self.pool.connect(1 << 1)

            # create a container
            self.container = DaosContainer(self.context)
            self.container.create(self.pool.handle)

        except DaosApiError as error:
            self.log.info("Error detected in DAOS pool container setup: %s",
                          str(error))
            self.log.info(traceback.format_exc())
            self.fail("##Test failed on setUp, before snapshot taken")

        # now open it
        self.container.open()

        # do a query and compare the UUID returned from create with
        # that returned by query
        self.container.query()

        if self.container.get_uuid_str() != c_uuid_to_str(
                self.container.info.ci_uuid):
            self.fail("##Container UUID did not match the one in info.")
Beispiel #21
0
 def test_metadata_addremove(self):
     """
     Test ID: DAOS-1512
     Test Description: Verify metadata release the space
                       after container delete.
     :avocado: tags=metadata,metadata_free_space,nvme,small
     """
     self.pool.connect(2)
     for k in range(10):
         container_array = []
         self.d_log.debug("Container Create Iteration {}".format(k))
         for cont in range(NO_OF_MAX_CONTAINER):
             container = DaosContainer(self.context)
             container.create(self.pool.handle)
             container_array.append(container)
         self.d_log.debug("Container Remove Iteration {} ".format(k))
         for cont in container_array:
             cont.destroy()
Beispiel #22
0
 def test_metadata_addremove(self):
     """
     Test ID: DAOS-1512
     Test Description: Verify metadata release the space
                       after container delete.
     :avocado: tags=metadata,metadata_free_space,nvme,small
     """
     self.pool.connect(2)
     for k in range(10):
         container_array = []
         self.d_log.debug("Container Create Iteration {}".format(k))
         for cont in range(NO_OF_MAX_CONTAINER):
             container = DaosContainer(self.context)
             container.create(self.pool.handle)
             container_array.append(container)
         self.d_log.debug("Container Remove Iteration {} ".format(k))
         for cont in container_array:
             cont.destroy()
Beispiel #23
0
    def test_metadata_fillup(self):
        """
        Test ID: DAOS-1512
        Test Description: Test to verify no IO happens after metadata is full.
        :avocado: tags=metadata,metadata_fill,nvme,small
        """
        self.pool.connect(2)
        container = DaosContainer(self.context)
        self.d_log.debug("Fillup Metadata....")
        for _cont in range(NO_OF_MAX_CONTAINER):
            container.create(self.pool.handle)
        self.d_log.debug("Metadata Overload...")
        #This should fail with no Metadata space Error.
        try:
            for _cont in range(250):
                container.create(self.pool.handle)
        except DaosApiError as exe:
            print (exe, traceback.format_exc())
            return

        self.fail("Test was expected to fail but it passed.\n")
Beispiel #24
0
    def test_metadata_fillup(self):
        """JIRA ID: DAOS-1512.

        Test Description:
            Test to verify no IO happens after metadata is full.

        Use Cases:
            ?

        :avocado: tags=metadata,metadata_fill,nvme,small
        """
        self.pool.connect(2)
        container = DaosContainer(self.context)

        self.d_log.debug("Fillup Metadata....")
        for _cont in range(NO_OF_MAX_CONTAINER):
            container.create(self.pool.handle)

        # This should fail with no Metadata space Error.
        self.d_log.debug("Metadata Overload...")
        try:
            for _cont in range(250):
                container.create(self.pool.handle)
            self.fail("Test expected to fail with a no metadata space error")

        except DaosApiError as exe:
            print(exe, traceback.format_exc())
            return

        self.fail("Test was expected to fail but it passed.\n")
Beispiel #25
0
    def setUp(self):
        self.agent_sessions = None
        self.pool = None
        self.container = None
        self.obj = None
        self.ioreq = None
        self.hostlist = None
        self.hostfile = None
        self.no_of_dkeys = None
        self.no_of_akeys = None
        self.array_size = None
        self.record_length = None

        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)
        basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        server_group = self.params.get("name",
                                       '/server_config/',
                                       'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)
        self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0]
        self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0]
        self.array_size = self.params.get("size", '/array_size/')
        self.record_length = self.params.get("length", '/run/record/*')

        self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist)
        server_utils.run_server(self.hostfile, server_group, basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(),
                         os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         None)
        self.pool.connect(2)

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.handle)
        self.container.open()

        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj, objtype=4)
Beispiel #26
0
    def setUp(self):
        self.agent_sessions = None
        try:
            # get paths from the build_vars generated by build
            with open('../../../.build_vars.json') as build_file:
                build_paths = json.load(build_file)
                self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

                self.server_group = self.params.get("name", '/server_config/',
                                                    'daos_server')

                # setup the DAOS python API
                self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

                self.hostlist = self.params.get("test_machines", '/run/hosts/*')
                self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                                self.workdir)

                self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                           self.hostlist)
                server_utils.run_server(self.hostfile, self.server_group,
                                        self.basepath)

                # parameters used in pool create
                createmode = self.params.get("mode", '/run/pool/createmode/')
                createsetid = self.params.get("setname", '/run/pool/createset/')
                createsize = self.params.get("size", '/run/pool/createsize/')

                createuid = os.geteuid()
                creategid = os.getegid()

                # initialize a python pool object then create the underlying
                # daos storage
                self.pool = DaosPool(self.context)
                self.pool.create(createmode, createuid, creategid,
                                 createsize, createsetid, None)
                self.pool.connect(1 << 1)

                # create a container
                self.container = DaosContainer(self.context)
                self.container.create(self.pool.handle)

                # now open it
                self.container.open()

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test failed during setup.\n")
Beispiel #27
0
    def setUp(self):

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as finput:
            build_paths = json.load(finput)
        basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        server_group = self.params.get("name", '/server_config/',
                                       'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)

        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        hostfile = write_host_file.write_host_file(self.hostlist, self.workdir)

        server_utils.run_server(hostfile, server_group, basepath)

        # Set up the pool and container.
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/pool/createmode/')
            createsetid = self.params.get("setname", '/run/pool/createset/')
            createsize = self.params.get("size", '/run/pool/createsize/*')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            # need a connection to create container
            self.pool.connect(1 << 1)

            # create a container
            self.container = DaosContainer(self.context)
            self.container.create(self.pool.handle)

            # now open it
            self.container.open()

        except DaosApiError as error:
            print(error)
            print(traceback.format_exc())
            self.fail("Test failed before snapshot taken")
Beispiel #28
0
def check_handle(pool_glob_handle, uuidstr, cont_glob_handle, rank):
    """
    This gets run in a child process and verifyes the global
    handles can be turned into local handles in another process.
    """

    try:
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)

        # setup the DAOS python API in this process
        context = DaosContext(build_paths['PREFIX'] + '/lib/')


        # setup the pool and connect using global handle
        pool = DaosPool(context)
        pool.uuid = uuidstr
        pool.set_svc(rank)
        pool.group = "daos_server"
        buf = ctypes.cast(pool_glob_handle.iov_buf,
                          ctypes.POINTER(ctypes.c_byte *
                                         pool_glob_handle.iov_buf_len))
        buf2 = bytearray()
        buf2.extend(buf.contents)
        pool_handle = pool.global2local(context,
                                        pool_glob_handle.iov_len,
                                        pool_glob_handle.iov_buf_len,
                                        buf2)

        # perform an operation that will use the new handle, if it
        # doesn't throw an exception, then all is well.
        pool.pool_query()

        # setup the container and then connect using the global handle
        container = DaosContainer(context)
        container.poh = pool_handle
        buf = ctypes.cast(cont_glob_handle.iov_buf,
                          ctypes.POINTER(ctypes.c_byte *
                                         cont_glob_handle.iov_buf_len))
        buf2 = bytearray()
        buf2.extend(buf.contents)
        dummy_cont_handle = container.global2local(context,
                                                   cont_glob_handle.iov_len,
                                                   cont_glob_handle.iov_buf_len,
                                                   buf2)
        # just try one thing to make sure handle is good
        container.query()

    except DaosApiError as excep:
        print(excep)
        print(traceback.format_exc())
        raise

    return
Beispiel #29
0
def get_container(context, pool, log=None):
    """Retrun a DAOS a container that has been created an opened.

    Args:
        context (DaosContext): the context to use to create the container
        pool (DaosPool): pool in which to create the container
        log (DaosLog|None): object for logging messages

    Returns:
        DaosContainer: an object representing a DAOS container

    """
    if log:
        log.info("Creating a container")
    container = DaosContainer(context)
    container.create(pool.handle)
    if log:
        log.info("Opening a container")
    container.open()
    return container
Beispiel #30
0
    def test_container_create(self):
        """
        Test ID: DAOS-689

        Test Description: valid and invalid container creation and close.

        :avocado: tags=regression,cont,contcreate
        """

        pool = None
        contuuid = None
        expected_results = []

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            createmode = self.params.get("mode", '/run/poolparams/')
            createuid = os.geteuid()
            creategid = os.getegid()
            createsetid = self.params.get("setname", '/run/poolparams/')
            createsize = self.params.get("size", '/run/poolparams/')

            # setup the pool
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid)
            pool.connect(1 << 1)

            # maybe use the good handle, maybe not
            handleparam = self.params.get("handle", '/run/poolhandle/*')
            if handleparam == 'VALID':
                poh = pool.handle
            else:
                poh = handleparam
                expected_results.append('FAIL')

            # maybe use a good UUID, maybe not
            uuidparam = self.params.get("uuid", "/uuids/*")
            expected_results.append(uuidparam[1])
            if uuidparam[0] == 'NULLPTR':
                self.cancel("skipping this test until DAOS-2043 is fixed")
                contuuid = 'NULLPTR'
            else:
                contuuid = uuid.UUID(uuidparam[0])

            should_fail = False
            for result in expected_results:
                if result == 'FAIL':
                    should_fail = True
                    break

            self.container = DaosContainer(self.context)
            self.container.create(poh, contuuid)

            # check UUID is the specified one
            if (uuidparam[0]).upper() != self.container.get_uuid_str().upper():
                print("uuidparam[0] is {}, uuid_str is {}".format(
                    uuidparam[0], self.container.get_uuid_str()))
                self.fail("Container UUID differs from specified at create\n")

            if should_fail:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if not should_fail:
                self.fail("Test was expected to pass but it failed.\n")
        finally:
            if pool is not None:
                pool.disconnect()
                pool.destroy(1)
Beispiel #31
0
    def test_array_obj(self):
        """
        Test ID: DAOS-961

        Test Description: Writes an array to an object and then reads it
        back and verifies it.

        :avocado: tags=object,arrayobj,regression,vm,small
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()
            print("uid is {} gid is {}".format(createuid, creategid))

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.Context)
            pool.create(createmode, createuid, creategid, createsize,
                        createsetid, None)
            self.pl.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.Context)
            container.create(pool.handle)
            self.pl.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            container.query()

            if container.get_uuid_str() != c_uuid_to_str(
                    container.info.ci_uuid):
                self.fail("Container UUID did not match the one in info\n")

            # create an object and write some data into it
            thedata = []
            thedata.append("data string one")
            thedata.append("data string two")
            thedata.append("data string tre")
            dkey = "this is the dkey"
            akey = "this is the akey"

            self.pl.info("writing array to dkey >%s< akey >%s<.", dkey, akey)
            oid, epoch = container.write_an_array_value(thedata, dkey, akey)

            # read the data back and make sure its correct
            length = len(thedata[0])
            thedata2 = container.read_an_array(len(thedata), length + 1, dkey,
                                               akey, oid, epoch)
            if thedata[0][0:length - 1] != thedata2[0][0:length - 1]:
                self.pl.error("Data mismatch")
                self.pl.error("Wrote: >%s<" (thedata[0]))
                self.pl.error("Read: >%s<" (thedata2[0]))
                self.fail("Write data, read it back, didn't match\n")

            if thedata[2][0:length - 1] != thedata2[2][0:length - 1]:
                self.pl.error("Data mismatch")
                self.pl.error("Wrote: >%s<" (thedata[2]))
                self.pl.error("Read: >%s<" (thedata2[2]))
                self.fail("Write data, read it back, didn't match\n")

            container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            container.destroy()

            # cleanup the pool
            pool.disconnect()
            pool.destroy(1)
            self.pl.info("Test Complete")

        except ValueError as e:
            self.pl.error("Test Failed, exception was thrown.")
            print e
            print traceback.format_exc()
            self.fail("Test was expected to pass but it failed.\n")
Beispiel #32
0
    def test_simple_rebuild(self):
        """
        Test ID: Rebuild-001

        Test Description: The most basic rebuild test.

        Use Cases:
          -- single pool rebuild, single client, various reord/object
             counts

        :avocado: tags=pool,rebuild,rebuildsimple
        """
        try:

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(self.createmode, self.createuid, self.creategid,
                        self.createsize, self.createsetid)

            # want an open connection during rebuild
            pool.connect(1 << 1)

            # get pool status we want to test later
            pool.pool_query()
            if pool.pool_info.pi_ndisabled != 0:
                self.fail("Number of disabled targets reporting incorrectly.\n")
            if pool.pool_info.pi_rebuild_st.rs_errno != 0:
                self.fail("Rebuild error but rebuild hasn't run.\n")
            if pool.pool_info.pi_rebuild_st.rs_done != 1:
                self.fail("Rebuild is running but device hasn't failed yet.\n")
            if pool.pool_info.pi_rebuild_st.rs_obj_nr != 0:
                self.fail("Rebuilt objs not zero.\n")
            if pool.pool_info.pi_rebuild_st.rs_rec_nr != 0:
                self.fail("Rebuilt recs not zero.\n")

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)

            # now open it
            container.open()

            saved_data = []
            for _objc in range(self.objcount):
                obj = None
                for _recc in range(self.reccount):
                    # make some stuff up and write
                    dkey = (
                        ''.join(random.choice(string.ascii_uppercase +
                                              string.digits) for _ in range(5)))
                    akey = (
                        ''.join(random.choice(string.ascii_uppercase +
                                              string.digits) for _ in range(5)))
                    data = (''.join(random.choice(string.ascii_uppercase +
                                                  string.digits)
                                    for _ in range(self.size)))

                    obj, txn = container.write_an_obj(data, len(data), dkey,
                                                      akey, obj, self.rank,
                                                      obj_cls=16)

                    saved_data.append((obj, dkey, akey, data, txn))

                    # read the data back and make sure its correct
                    data2 = container.read_an_obj(self.size, dkey, akey, obj,
                                                  txn)
                    if data != data2.value:
                        self.fail("Write data 1, read it back, didn't match\n")

            # kill a server that has
            server = DaosServer(self.context, self.server_group, self.rank)
            server.kill(1)

            # temporarily, the exclude of a failed target must be done manually
            pool.exclude([self.rank])

            while True:
                # get the pool/rebuild status again
                pool.pool_query()
                if pool.pool_info.pi_rebuild_st.rs_done == 1:
                    break
                else:
                    time.sleep(2)

            if pool.pool_info.pi_ndisabled != 1:
                self.fail("Number of disabled targets reporting incorrectly: {}"
                          .format(pool.pool_info.pi_ndisabled))
            if pool.pool_info.pi_rebuild_st.rs_errno != 0:
                self.fail("Rebuild error reported: {}"
                          .format(pool.pool_info.pi_rebuild_st.rs_errno))
            if pool.pool_info.pi_rebuild_st.rs_obj_nr != self.objcount:
                self.fail("Rebuilt objs not as expected: {0} {1}"
                          .format(pool.pool_info.pi_rebuild_st.rs_obj_nr,
                                  self.objcount))
            if (pool.pool_info.pi_rebuild_st.rs_rec_nr !=
                    (self.reccount*self.objcount)):
                self.fail("Rebuilt recs not as expected: {0} {1}"
                          .format(pool.pool_info.pi_rebuild_st.rs_rec_nr,
                                  self.reccount*self.objcount))

            # now that the rebuild finished verify the records are correct
            for tup in saved_data:
                data2 = container.read_an_obj(len(tup[3]), tup[1], tup[2],
                                              tup[0], tup[4])
                if tup[3] != data2.value:
                    self.fail("after rebuild data didn't check out")

        except DaosApiError as excp:
            print (excp)
            print (traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")
Beispiel #33
0
class ContainerAttributeTest(Test):
    """
    Tests DAOS container attribute get/set/list.
    """
    def setUp(self):
        self.agent_sessions = None
        self.pool = None
        self.container = None
        self.hostlist = None
        self.large_data_set = {}

        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        server_group = self.params.get("name",
                                       '/server_config/',
                                       'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist)
        server_utils.run_server(self.hostfile, server_group, basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/attrtests/createmode/*'),
                         os.geteuid(),
                         os.getegid(),
                         self.params.get("size", '/run/attrtests/createsize/*'),
                         self.params.get("setname",
                                         '/run/attrtests/createset/*'),
                         None)
        self.pool.connect(1 << 1)
        poh = self.pool.handle
        self.container = DaosContainer(self.context)
        self.container.create(poh)
        self.container.open()

    def tearDown(self):
        try:
            if self.container:
                self.container.close()
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    def create_data_set(self):
        """
        To create the large attribute dictionary
        """
        allchar = string.ascii_letters + string.digits
        for i in range(1024):
            self.large_data_set[str(i)] = (
                "".join(random.choice(allchar)
                        for x in range(random.randint(1, 100))))

    def test_container_attribute(self):
        """
        Test basic container attribute tests.
        :avocado: tags=container,container_attr,attribute,sync_conattribute
        """
        expected_for_param = []
        name = self.params.get("name", '/run/attrtests/name_handles/*/')
        expected_for_param.append(name[1])
        value = self.params.get("value", '/run/attrtests/value_handles/*/')
        expected_for_param.append(value[1])

        attr_dict = {name[0]:value[0]}
        if name[0] is not None:
            if "largenumberofattr" in name[0]:
                self.create_data_set()
                attr_dict = self.large_data_set
                attr_dict[name[0]] = value[0]

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break
        try:
            self.container.set_attr(data=attr_dict)
            size, buf = self.container.list_attr()

            verify_list_attr(attr_dict, size, buf)

            # Request something that doesn't exist
            if name[0] is not None and "Negative" in name[0]:
                name[0] = "rubbish"

            results = {}
            results = self.container.get_attr([name[0]])

            # for this test the dictionary has been altered, need to just
            # set it to what we are expecting to get back
            if name[0] is not None:
                if "largenumberofattr" in name[0]:
                    attr_dict.clear()
                    attr_dict[name[0]] = value[0]

            verify_get_attr(attr_dict, results)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except (DaosApiError, DaosTestError) as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")

    def test_container_attribute_asyn(self):
        """
        Test basic container attribute tests.

        :avocado: tags=container,container_attr,attribute,async_conattribute
        """
        global GLOB_SIGNAL
        global GLOB_RC

        expected_for_param = []
        name = self.params.get("name", '/run/attrtests/name_handles/*/')
        expected_for_param.append(name[1])
        value = self.params.get("value", '/run/attrtests/value_handles/*/')
        expected_for_param.append(value[1])

        attr_dict = {name[0]:value[0]}
        if name[0] is not None:
            if "largenumberofattr" in name[0]:
                self.create_data_set()
                attr_dict = self.large_data_set
                attr_dict[name[0]] = value[0]

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break
        try:
            GLOB_SIGNAL = threading.Event()
            self.container.set_attr(data=attr_dict, cb_func=cb_func)
            GLOB_SIGNAL.wait()
            if GLOB_RC != 0 and expected_result in ['PASS']:
                self.fail("RC not as expected after set_attr First {0}"
                          .format(GLOB_RC))

            GLOB_SIGNAL = threading.Event()
            size, buf = self.container.list_attr(cb_func=cb_func)
            GLOB_SIGNAL.wait()
            if GLOB_RC != 0 and expected_result in ['PASS']:
                self.fail("RC not as expected after list_attr First {0}"
                          .format(GLOB_RC))
            if expected_result in ['PASS']:
                verify_list_attr(attr_dict, size, buf, mode="async")

            # Request something that doesn't exist
            if name[0] is not None and "Negative" in name[0]:
                name[0] = "rubbish"

            GLOB_SIGNAL = threading.Event()
            self.container.get_attr([name[0]], cb_func=cb_func)

            GLOB_SIGNAL.wait()

            if GLOB_RC != 0 and expected_result in ['PASS']:
                self.fail("RC not as expected after get_attr {0}"
                          .format(GLOB_RC))

            # not verifying the get_attr since its not available asynchronously

            if value[0] != None:
                if GLOB_RC == 0 and expected_result in ['FAIL']:
                    self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
    def test_container_basics(self):
        """
        Test basic container create/destroy/open/close/query.  Nothing fancy
        just making sure they work at a rudimentary level

        :avocado: tags=container,containercreate,containerdestroy,basecont
        """

        pool = None
        hostlist = None

        try:
            hostlist = self.params.get("test_machines", '/run/hosts/*')
            hostfile = write_host_file.write_host_file(hostlist,
                                                       self.workdir)

            self.agent_sessions = agent_utils.run_agent(self.basepath, hostlist)
            server_utils.run_server(hostfile, self.server_group, self.basepath)

            # give it time to start
            time.sleep(2)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createuid = self.params.get("uid", '/run/conttests/createuid/')
            creategid = self.params.get("gid", '/run/conttests/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)

            # now open it
            container.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            container.query()

            if container.get_uuid_str() != c_uuid_to_str(
                    container.info.ci_uuid):
                self.fail("Container UUID did not match the one in info'n")

            container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            container.destroy()

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test was expected to pass but it failed.\n")
        except Exception as excep:
            self.fail("Daos code segfaulted most likely, error: %s" % excep)
        finally:
            # cleanup the pool
            if pool is not None:
                pool.disconnect()
                pool.destroy(1)
            if self.agent_sessions:
                agent_utils.stop_agent(self.agent_sessions)
            server_utils.stop_server(hosts=hostlist)
Beispiel #35
0
    def test_queryasync(self):
        """
        Test container query for asynchronous mode.

        :avocado: tags=container,containerasync,queryasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)

            self.container1.create(poh)

            dummy_str_cuuid = self.container1.get_uuid_str()

            # Open container
            self.container1.open(poh, None, 2, None)

            GLOB_SIGNAL = threading.Event()
            self.container1.query(cb_func=cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test: "
                          "{0}".format(GLOB_RC))
            print("RC after successful container create: ", GLOB_RC)

            # Close opened container
            self.container1.close()

            # Try to open container2, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.query(cb_func=cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test: "
                          "{0}".format(GLOB_RC))
            print("RC after container destroy failed:", GLOB_RC)

            # cleanup the pool and container
            self.container1.destroy()
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
Beispiel #36
0
    def test_closehandle(self):
        """
        Test container close function with container handle paramter.

        :avocado: tags=container,openclose,closehandle
        """
        saved_coh = None

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        coh_params = self.params.get("coh",
                                     '/run/container/container_handle/*/')

        expected_result = coh_params[1]

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.Context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None)

            poh = self.pool.handle
            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.Container1 = DaosContainer(self.Context)
            self.Container1.create(poh)
            str_cuuid = self.Container1.get_uuid_str()
            cuuid = uuid.UUID(str_cuuid)
            self.Container1.open(poh, cuuid, 2, None)

            # Defining 'good' and 'bad' container handles
            saved_coh = self.Container1.coh
            if coh_params[0] == 'GOOD':
                coh = self.Container1.coh
            else:
                # create a second container, open to get a handle
                # then close & destroy so handle is invalid
                self.Container2 = DaosContainer(self.Context)
                self.Container2.create(poh)
                self.Container2.open(poh, cuuid, 2, None)
                coh = self.Container2.coh
                self.Container2.close()
                self.Container2.destroy()

            # close container with either good or bad handle
            self.Container1.close(coh)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as e:
            if expected_result == 'PASS':
                print(e)
                print(traceback.format_exc())
                self.fail("Test was expected to pass but it failed.\n")

            # close above failed so close for real with the right coh
            if saved_coh is not None:
                self.Container1.close(saved_coh)

        finally:
            self.Container1.destroy(1)
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None
Beispiel #37
0
class OpenClose(Test):
    """
    Tests DAOS container open/close function with handle parameter.
    """
    def __init__(self, *args, **kwargs):
        super(OpenClose, self).__init__(*args, **kwargs)
        self.container1 = None
        self.container2 = None

    def setUp(self):
        # these are first since they are referenced in teardown
        self.pool = None
        self.hostlist = None
        self.hostlist = self.params.get("test_servers", '/run/hosts/')

        # get paths from the build_vars generated by build
        with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               '../../../../.build_vars.json')) as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            try:
                if self.agent_sessions:
                    AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
                server_utils.stop_server(hosts=self.hostlist)
            except server_utils.ServerFailed:
                pass

    def test_closehandle(self):
        """
        Test container close function with container handle paramter.

        :avocado: tags=container,openclose,closehandle
        """
        saved_coh = None

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        coh_params = self.params.get("coh",
                                     '/run/container/container_handle/*/')

        expected_result = coh_params[1]

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle
            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container1.create(poh)
            str_cuuid = self.container1.get_uuid_str()
            cuuid = uuid.UUID(str_cuuid)
            self.container1.open(poh, cuuid, 2, None)

            # Defining 'good' and 'bad' container handles
            saved_coh = self.container1.coh
            if coh_params[0] == 'GOOD':
                coh = self.container1.coh
            else:
                # create a second container, open to get a handle
                # then close & destroy so handle is invalid
                self.container2 = DaosContainer(self.context)
                self.container2.create(poh)
                self.container2.open(poh, cuuid, 2, None)
                coh = self.container2.coh
                self.container2.close()
                self.container2.destroy()

            # close container with either good or bad handle
            self.container1.close(coh)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            if expected_result == 'PASS':
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to pass but it failed.\n")

            # close above failed so close for real with the right coh
            if saved_coh is not None:
                self.container1.close(saved_coh)

        finally:
            self.container1.destroy(1)
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None
Beispiel #38
0
    def test_rebuild_with_io(self):
        """
        Test ID: Rebuild-003

        Test Description: Trigger a rebuild while I/O is ongoing.

        Use Cases:
          -- single pool, single client performing continous read/write/verify
             sequence while failure/rebuild is triggered in another process

        :avocado: tags=pool,rebuild,rebuildwithio
        """

        # the rebuild tests need to redo this stuff each time so not in setup
        # as it usually would be
        server_group = self.params.get("name", '/server_config/',
                                       'daos_server')

        basepath = os.path.normpath(self.build_paths['PREFIX'] + "/../")

        self.hostlist = self.params.get("test_machines", '/run/hosts/')
        hostfile = write_host_file.write_host_file(self.hostlist, self.workdir)

        try:
            self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist)
            server_utils.run_server(hostfile, server_group, basepath)

            # use the uid/gid of the user running the test, these should
            # be perfectly valid
            createuid = os.geteuid()
            creategid = os.getegid()

            # parameters used in pool create that are in yaml
            createmode = self.params.get("mode", '/run/testparams/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/testparams/createset/')
            createsize = self.params.get("size", '/run/testparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            pool.connect(1 << 1)
            container = DaosContainer(self.context)
            container.create(pool.handle)
            container.open()

            # get pool status and make sure it all looks good before we start
            pool.pool_query()
            if pool.pool_info.pi_ndisabled != 0:
                self.fail("Number of disabled targets reporting incorrectly.\n")
            if pool.pool_info.pi_rebuild_st.rs_errno != 0:
                self.fail("Rebuild error but rebuild hasn't run.\n")
            if pool.pool_info.pi_rebuild_st.rs_done != 1:
                self.fail("Rebuild is running but device hasn't failed yet.\n")
            if pool.pool_info.pi_rebuild_st.rs_obj_nr != 0:
                self.fail("Rebuilt objs not zero.\n")
            if pool.pool_info.pi_rebuild_st.rs_rec_nr != 0:
                self.fail("Rebuilt recs not zero.\n")
            dummy_pool_version = pool.pool_info.pi_rebuild_st.rs_version

            # do I/O for 30 seconds
            dummy_bw = io_utilities.continuous_io(container, 30)

            # trigger the rebuild
            rank = self.params.get("rank", '/run/testparams/ranks/*')
            server = DaosServer(self.context, server_group, rank)
            server.kill(1)
            pool.exclude([rank])

            # do another 30 seconds of I/O,
            # waiting for some improvements in server bootstrap
            # at which point we can move the I/O to a separate client and
            # really pound it with I/O
            dummy_bw = io_utilities.continuous_io(container, 30)

            # wait for the rebuild to finish
            while True:
                pool.pool_query()
                if pool.pool_info.pi_rebuild_st.rs_done == 1:
                    break
                else:
                    time.sleep(2)

            # check rebuild statistics
            if pool.pool_info.pi_ndisabled != 1:
                self.fail("Number of disabled targets reporting incorrectly: {}"
                          .format(pool.pool_info.pi_ndisabled))
            if pool.pool_info.pi_rebuild_st.rs_errno != 0:
                self.fail("Rebuild error reported: {}".format(
                    pool.pool_info.pi_rebuild_st.rs_errno))
            if pool.pool_info.pi_rebuild_st.rs_obj_nr <= 0:
                self.fail("No objects have been rebuilt.")
            if pool.pool_info.pi_rebuild_st.rs_rec_nr <= 0:
                self.fail("No records have been rebuilt.")

        except (ValueError, DaosApiError) as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")

        finally:
            # wait for the I/O process to finish
            try:
                server_utils.stop_server(hosts=self.hostlist)
                os.remove(hostfile)
                # really make sure everything is gone
                check_for_pool.cleanup_pools(self.hostlist)
            finally:
                if self.agent_sessions:
                    AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
                server_utils.kill_server(self.hostlist)
Beispiel #39
0
    def test_container_delete(self):
        """
        Test basic container delete
        :avocado: tags=regression,cont,vm,contdelete
        """
        expected_for_param = []
        uuidlist = self.params.get("uuid",
                                   '/run/createtests/ContainerUUIDS/*/')
        cont_uuid = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        pohlist = self.params.get("poh", '/run/createtests/PoolHandles/*/')
        poh = pohlist[0]
        expected_for_param.append(pohlist[1])

        openlist = self.params.get("opened",
                                   "/run/createtests/ConnectionOpened/*/")
        opened = openlist[0]
        expected_for_param.append(openlist[1])

        forcelist = self.params.get("force", "/run/createtests/ForceDestroy/*/")
        force = forcelist[0]
        expected_for_param.append(forcelist[1])

        if force >= 1:
            self.cancel("Force >= 1 blocked by issue described in "
                        "https://jira.hpdd.intel.com/browse/DAOS-689")

        if force == 0:
            self.cancel("Force = 0 blocked by "
                        "https://jira.hpdd.intel.com/browse/DAOS-1935")

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(self.createmode, self.createuid, self.creategid,
                             self.createsize, self.createsetid, None)

            # need a connection to create container
            self.pool.connect(1 << 1)
            self.container = DaosContainer(self.context)

            # create should always work (testing destroy)
            if not cont_uuid == 'INVALID':
                cont_uuid = uuid.UUID(uuidlist[0])
                self.container.create(self.pool.handle, cont_uuid)
            else:
                self.container.create(self.pool.handle)

            # Opens the container if required
            if opened:
                self.container.open(self.pool.handle)

            # wait a few seconds and then attempts to destroy container
            time.sleep(5)
            if poh == 'VALID':
                poh = self.pool.handle

            # if container is INVALID, overwrite with non existing UUID
            if cont_uuid == 'INVALID':
                cont_uuid = uuid.uuid4()

            self.container.destroy(force=force, poh=poh, con_uuid=cont_uuid)
            self.container = None

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            self.d_log.error(excep)
            self.d_log.error(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")

        finally:
            # clean up the pool
            if self.pool is not None:
                self.pool.destroy(1)
                self.pool = None
Beispiel #40
0
    def test_destroy_withdata(self):
        """
        Test destroy and recreate one right after the other multiple times
        Should fail.

        :avocado: tags=pool,pooldestroy,destroydata
        """
        try:
            # write out a hostfile and start the servers with it
            self.hostlist = self.params.get("test_machines1", '/run/hosts/')
            hostfile = write_host_file.write_host_file(self.hostlist, self.tmp)

            self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                       self.hostlist)
            server_utils.run_server(hostfile, self.server_group, self.basepath)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/poolparams/createmode/')
            createuid = self.params.get("uid", '/run/poolparams/createuid/')
            creategid = self.params.get("gid", '/run/poolparams/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/poolparams/createset/')
            createsize = self.params.get("size", '/run/poolparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)

            pool.disconnect()

            daosctl = self.basepath + '/install/bin/daosctl'

            write_cmd = ('{0} write-pattern -i {1} -l 0 -c {2} -p sequential'.
                         format(daosctl, c_uuid_to_str(pool.uuid),
                                c_uuid_to_str(container.uuid)))

            process.system_output(write_cmd)

            # blow it away
            pool.destroy(1)

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("create/destroy/create/destroy test failed.\n")

        except Exception as excep:
            self.fail("Daos code segfaulted most likely.  Error: %s" % excep)

        # no matter what happens cleanup
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)
            os.remove(hostfile)
Beispiel #41
0
    def write_a_bunch_of_values(self, how_many):
        """
        Write data to an object, each with a dkey and akey.  The how_many
        parameter determines how many key:value pairs are written.
        """

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.handle)
        self.container.open()

        ioreq = IORequest(self.context, self.container, None)
        epoch = self.container.get_new_epoch()
        c_epoch = ctypes.c_uint64(epoch)

        print("Started Writing the Dataset-----------\n")
        inc = 50000
        last_key = inc
        for key in range(how_many):
            c_dkey = ctypes.create_string_buffer("dkey {0}".format(key))
            c_akey = ctypes.create_string_buffer("akey {0}".format(key))
            c_value = ctypes.create_string_buffer(
                "some data that gets stored with the key {0}".format(key))
            c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
            ioreq.single_insert(c_dkey,
                                c_akey,
                                c_value,
                                c_size,
                                c_epoch)

            if key > last_key:
                print("written: {}".format(key))
                sys.stdout.flush()
                last_key = key + inc

        self.container.commit_epoch(c_epoch)

        print("Started Verification of the Dataset-----------\n")
        last_key = inc
        for key in range(how_many):
            c_dkey = ctypes.create_string_buffer("dkey {0}".format(key))
            c_akey = ctypes.create_string_buffer("akey {0}".format(key))
            the_data = "some data that gets stored with the key {0}".format(key)
            val = ioreq.single_fetch(c_dkey,
                                     c_akey,
                                     len(the_data)+1,
                                     c_epoch)

            if the_data != (repr(val.value)[1:-1]):
                self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}, "
                          "Expected Value={2} and Received Value={3}\n"
                          .format("dkey {0}".format(key),
                                  "akey {0}".format(key),
                                  the_data,
                                  repr(val.value)[1:-1]))

            if key > last_key:
                print("veried: {}".format(key))
                sys.stdout.flush()
                last_key = key + inc

        print("starting destroy")
        self.container.close()
        self.container.destroy()
        print("destroy complete")
Beispiel #42
0
class CreateManyDkeys(Test):
    """
    Test Class Description:
        Tests that create large numbers of keys in objects/containers and then
        destroy the containers and verify the space has been reclaimed.

    """
    def setUp(self):
        self.agent_sessions = None
        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)
        basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        server_group = self.params.get("name",
                                       '/server_config/',
                                       'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.container = None
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist)
        server_utils.run_server(self.hostfile, server_group, basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(),
                         os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         None)
        self.pool.connect(1 << 1)

    def tearDown(self):
        try:
            if self.hostfile is not None:
                os.remove(self.hostfile)
            if self.pool:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    def write_a_bunch_of_values(self, how_many):
        """
        Write data to an object, each with a dkey and akey.  The how_many
        parameter determines how many key:value pairs are written.
        """

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.handle)
        self.container.open()

        ioreq = IORequest(self.context, self.container, None)
        epoch = self.container.get_new_epoch()
        c_epoch = ctypes.c_uint64(epoch)

        print("Started Writing the Dataset-----------\n")
        inc = 50000
        last_key = inc
        for key in range(how_many):
            c_dkey = ctypes.create_string_buffer("dkey {0}".format(key))
            c_akey = ctypes.create_string_buffer("akey {0}".format(key))
            c_value = ctypes.create_string_buffer(
                "some data that gets stored with the key {0}".format(key))
            c_size = ctypes.c_size_t(ctypes.sizeof(c_value))
            ioreq.single_insert(c_dkey,
                                c_akey,
                                c_value,
                                c_size,
                                c_epoch)

            if key > last_key:
                print("written: {}".format(key))
                sys.stdout.flush()
                last_key = key + inc

        self.container.commit_epoch(c_epoch)

        print("Started Verification of the Dataset-----------\n")
        last_key = inc
        for key in range(how_many):
            c_dkey = ctypes.create_string_buffer("dkey {0}".format(key))
            c_akey = ctypes.create_string_buffer("akey {0}".format(key))
            the_data = "some data that gets stored with the key {0}".format(key)
            val = ioreq.single_fetch(c_dkey,
                                     c_akey,
                                     len(the_data)+1,
                                     c_epoch)

            if the_data != (repr(val.value)[1:-1]):
                self.fail("ERROR: Data mismatch for dkey = {0}, akey={1}, "
                          "Expected Value={2} and Received Value={3}\n"
                          .format("dkey {0}".format(key),
                                  "akey {0}".format(key),
                                  the_data,
                                  repr(val.value)[1:-1]))

            if key > last_key:
                print("veried: {}".format(key))
                sys.stdout.flush()
                last_key = key + inc

        print("starting destroy")
        self.container.close()
        self.container.destroy()
        print("destroy complete")

    @avocado.fail_on(DaosApiError)
    @avocado.skip("Skipping until DAOS-1721 is fixed.")
    def test_many_dkeys(self):
        """
        Test ID: DAOS-1701
        Test Description: Test many of dkeys in same object.
        Use Cases: 1. large key counts
                   2. space reclaimation after destroy
        :avocado: tags=object,vm,many_dkeys

        """

        no_of_dkeys = self.params.get("number_of_dkeys", '/run/dkeys/')

        # write a lot of individual data items, verify them, then destroy
        self.write_a_bunch_of_values(no_of_dkeys)


        # do it again, which should verify the first container
        # was truely destroyed because a second round won't fit otherwise
        self.write_a_bunch_of_values(no_of_dkeys)
Beispiel #43
0
class ContainerAsync(Test):
    """
    Tests DAOS pool connect permissions (non existing pool handle, bad uuid)
    and close.
    """

    def __init__(self, *args, **kwargs):
        super(ContainerAsync, self).__init__(*args, **kwargs)
        self.container1 = None
        self.container2 = None

    def setUp(self):
        self.agent_sessions = None
        self.hostlist = None
        self.pool = None

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.pool = None

        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)
        print("Host file is: {}".format(self.hostfile))

        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)
        time.sleep(10)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            time.sleep(5)
            server_utils.stop_server(hosts=self.hostlist)

    def test_createasync(self):
        """
        Test container create for asynchronous mode.

        :avocado: tags=container,containerasync,createasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)


            GLOB_SIGNAL = threading.Event()
            self.container1.create(poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print("RC after successful container create: ", GLOB_RC)

            # Try to recreate container after destroying pool,
            # this should fail. Checking rc after failure.
            self.pool.destroy(1)
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.create(poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test")
            print("RC after unsuccessful container create: ", GLOB_RC)

            # cleanup the pool and container
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())

    def test_destroyasync(self):
        """
        Test container destroy for asynchronous mode.

        :avocado: tags=container,containerasync,contdestroyasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)

            self.container1.create(poh)

            GLOB_SIGNAL = threading.Event()
            self.container1.destroy(1, poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print("RC after successful container create: ", GLOB_RC)

            # Try to destroy container again, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.destroy(1, poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != -1003:
                self.fail("RC not as expected in async test")
            print("RC after container destroy failed:", GLOB_RC)

            # cleanup the pool and container
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())

    def test_openasync(self):
        """
        Test container open for asynchronous mode.

        :avocado: tags=container,containerasync,openasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)

            self.container1.create(poh)

            str_cuuid = self.container1.get_uuid_str()
            cuuid = uuid.UUID(str_cuuid)

            GLOB_SIGNAL = threading.Event()
            self.container1.open(poh, cuuid, 2, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print("RC after successful container create: ", GLOB_RC)

            # Try to open container2, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.open(None, None, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test")
            print("RC after container destroy failed:", GLOB_RC)

            # cleanup the pool and container
            self.container1.close()
            self.container1.destroy()
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())

    def test_closeasync(self):
        """
        Test container close for asynchronous mode.

        :avocado: tags=container,containerasync,closeasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)


            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)

            self.container1.create(poh)

            str_cuuid = self.container1.get_uuid_str()
            cuuid = uuid.UUID(str_cuuid)

            self.container1.open(poh, cuuid, 2)

            GLOB_SIGNAL = threading.Event()
            self.container1.close(cb_func=cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test: "
                          "{0}".format(GLOB_RC))
            print("RC after successful container create: ", GLOB_RC)

            # Try to open container2, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.close(cb_func=cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test: "
                          "{0}".format(GLOB_RC))
            print("RC after container destroy failed:", GLOB_RC)

            # cleanup the pool and container
            self.container1.destroy()
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())

    def test_queryasync(self):
        """
        Test container query for asynchronous mode.

        :avocado: tags=container,containerasync,queryasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)

            self.container1.create(poh)

            dummy_str_cuuid = self.container1.get_uuid_str()

            # Open container
            self.container1.open(poh, None, 2, None)

            GLOB_SIGNAL = threading.Event()
            self.container1.query(cb_func=cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test: "
                          "{0}".format(GLOB_RC))
            print("RC after successful container create: ", GLOB_RC)

            # Close opened container
            self.container1.close()

            # Try to open container2, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.query(cb_func=cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test: "
                          "{0}".format(GLOB_RC))
            print("RC after container destroy failed:", GLOB_RC)

            # cleanup the pool and container
            self.container1.destroy()
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
Beispiel #44
0
    def test_closehandle(self):
        """
        Test container close function with container handle paramter.

        :avocado: tags=container,openclose,closehandle
        """
        saved_coh = None

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        coh_params = self.params.get("coh",
                                     '/run/container/container_handle/*/')

        expected_result = coh_params[1]

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle
            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container1.create(poh)
            str_cuuid = self.container1.get_uuid_str()
            cuuid = uuid.UUID(str_cuuid)
            self.container1.open(poh, cuuid, 2, None)

            # Defining 'good' and 'bad' container handles
            saved_coh = self.container1.coh
            if coh_params[0] == 'GOOD':
                coh = self.container1.coh
            else:
                # create a second container, open to get a handle
                # then close & destroy so handle is invalid
                self.container2 = DaosContainer(self.context)
                self.container2.create(poh)
                self.container2.open(poh, cuuid, 2, None)
                coh = self.container2.coh
                self.container2.close()
                self.container2.destroy()

            # close container with either good or bad handle
            self.container1.close(coh)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            if expected_result == 'PASS':
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to pass but it failed.\n")

            # close above failed so close for real with the right coh
            if saved_coh is not None:
                self.container1.close(saved_coh)

        finally:
            self.container1.destroy(1)
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None
Beispiel #45
0
class OpenClose(Test):
    """
    Tests DAOS container open/close function with handle parameter.
    """
    def setUp(self):
        # these are first since they are referenced in teardown
        self.pool = None
        self.hostlist = None
        self.hostlist = self.params.get("test_servers", '/run/hosts/')

        # get paths from the build_vars generated by build
        with open(
                os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             '../../../../.build_vars.json')) as f:
            build_paths = json.load(f)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        self.server_group = self.params.get("server_group", '/server/',
                                            'daos_server')

        # setup the DAOS python API
        self.Context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.hostfile = WriteHostFile.WriteHostFile(self.hostlist,
                                                    self.workdir)

        ServerUtils.runServer(self.hostfile, self.server_group, self.basepath)

        time.sleep(5)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            try:
                ServerUtils.stopServer(hosts=self.hostlist)
            except ServerFailed as e:
                pass

    def test_closehandle(self):
        """
        Test container close function with container handle paramter.

        :avocado: tags=container,openclose,closehandle
        """
        saved_coh = None

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        coh_params = self.params.get("coh",
                                     '/run/container/container_handle/*/')

        expected_result = coh_params[1]

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.Context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None)

            poh = self.pool.handle
            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.Container1 = DaosContainer(self.Context)
            self.Container1.create(poh)
            str_cuuid = self.Container1.get_uuid_str()
            cuuid = uuid.UUID(str_cuuid)
            self.Container1.open(poh, cuuid, 2, None)

            # Defining 'good' and 'bad' container handles
            saved_coh = self.Container1.coh
            if coh_params[0] == 'GOOD':
                coh = self.Container1.coh
            else:
                # create a second container, open to get a handle
                # then close & destroy so handle is invalid
                self.Container2 = DaosContainer(self.Context)
                self.Container2.create(poh)
                self.Container2.open(poh, cuuid, 2, None)
                coh = self.Container2.coh
                self.Container2.close()
                self.Container2.destroy()

            # close container with either good or bad handle
            self.Container1.close(coh)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as e:
            if expected_result == 'PASS':
                print(e)
                print(traceback.format_exc())
                self.fail("Test was expected to pass but it failed.\n")

            # close above failed so close for real with the right coh
            if saved_coh is not None:
                self.Container1.close(saved_coh)

        finally:
            self.Container1.destroy(1)
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None
Beispiel #46
0
class DeleteContainerTest(Test):
    """
    Tests DAOS container delete and close.
    """
    def setUp(self):
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        self.hostlist = None
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        # parameters used in pool create
        self.createmode = self.params.get("mode",
                                          '/run/createtests/createmode/')
        self.createuid = os.geteuid()
        self.creategid = os.getegid()
        self.createsetid = self.params.get("setname",
                                           '/run/createtests/createset/')
        self.createsize = self.params.get("size",
                                          '/run/createtests/createsize/')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.pool = None
        self.container = None

        # hostlist and logging
        self.d_log = DaosLog(self.context)

        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)
        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

    def tearDown(self):
        if self.agent_sessions:
            AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
        server_utils.stop_server(hosts=self.hostlist)

    def test_container_delete(self):
        """
        Test basic container delete
        :avocado: tags=regression,cont,vm,contdelete
        """
        expected_for_param = []
        uuidlist = self.params.get("uuid",
                                   '/run/createtests/ContainerUUIDS/*/')
        cont_uuid = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        pohlist = self.params.get("poh", '/run/createtests/PoolHandles/*/')
        poh = pohlist[0]
        expected_for_param.append(pohlist[1])

        openlist = self.params.get("opened",
                                   "/run/createtests/ConnectionOpened/*/")
        opened = openlist[0]
        expected_for_param.append(openlist[1])

        forcelist = self.params.get("force", "/run/createtests/ForceDestroy/*/")
        force = forcelist[0]
        expected_for_param.append(forcelist[1])

        if force >= 1:
            self.cancel("Force >= 1 blocked by issue described in "
                        "https://jira.hpdd.intel.com/browse/DAOS-689")

        if force == 0:
            self.cancel("Force = 0 blocked by "
                        "https://jira.hpdd.intel.com/browse/DAOS-1935")

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(self.createmode, self.createuid, self.creategid,
                             self.createsize, self.createsetid, None)

            # need a connection to create container
            self.pool.connect(1 << 1)
            self.container = DaosContainer(self.context)

            # create should always work (testing destroy)
            if not cont_uuid == 'INVALID':
                cont_uuid = uuid.UUID(uuidlist[0])
                self.container.create(self.pool.handle, cont_uuid)
            else:
                self.container.create(self.pool.handle)

            # Opens the container if required
            if opened:
                self.container.open(self.pool.handle)

            # wait a few seconds and then attempts to destroy container
            time.sleep(5)
            if poh == 'VALID':
                poh = self.pool.handle

            # if container is INVALID, overwrite with non existing UUID
            if cont_uuid == 'INVALID':
                cont_uuid = uuid.uuid4()

            self.container.destroy(force=force, poh=poh, con_uuid=cont_uuid)
            self.container = None

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            self.d_log.error(excep)
            self.d_log.error(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")

        finally:
            # clean up the pool
            if self.pool is not None:
                self.pool.destroy(1)
                self.pool = None
class SameKeyDifferentValue(TestWithServers):
    """
    Test Description: Test to verify different type of values
    passed to same akey and dkey.
    :avocado: recursive
    """
    def setUp(self):
        try:
            super(SameKeyDifferentValue, self).setUp()

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/pool/createmode/')
            createsetid = self.params.get("setname", '/run/pool/createset/')
            createsize = self.params.get("size", '/run/pool/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None)
            self.pool.connect(1 << 1)

            # create a container
            self.container = DaosContainer(self.context)
            self.container.create(self.pool.handle)

            # now open it
            self.container.open()

        except DaosApiError as excpn:
            print(excpn)
            print(traceback.format_exc())
            self.fail("Test failed during setup.\n")

    def tearDown(self):

        try:
            if self.container:
                self.container.close()
                # wait a few seconds and then destroy
                time.sleep(5)
                self.container.destroy()

            # cleanup the pool
            if self.pool:
                self.pool.disconnect()
                self.pool.destroy(1)

        except DaosApiError as excpn:
            print(excpn)
            print(traceback.format_exc())
            self.fail("Test failed during teardown.\n")

        finally:
            super(SameKeyDifferentValue, self).tearDown()

    def test_single_to_array_value(self):
        """
        Jira ID: DAOS-2218
        Test Description: Test to verify different type of
        values passed (i.e. single to array value) to the same akey and dkey.
        Case1: Insert akey,dkey with single value
               Insert same akey,dkey with array value
               Result: should return -1001 ERR.
        Case2: Insert akey,dkey with single value
               Punch the keys
               Insert same akey,dkey under same object with array value
               Result: should either pass or return -1001 ERR
        Case3: Insert akey,dkey with single value
               Punch the keys
               Trigger aggregation
               Insert same akey,dkey under same object with array value
               Result: should either pass or return -1001 ERR
        :avocado: tags=object,samekeydifferentvalue,singletoarray,vm,small
        """

        # define akey,dkey, single value data and array value data
        single_value_data = "a string that I want to stuff into an object"
        array_value_data = []
        array_value_data.append("data string one")
        array_value_data.append("data string two")
        array_value_data.append("data string tre")

        dkey = "this is the dkey"
        akey = "this is the akey"

        aggregation = False

        for i in range(3):
            try:
                # create an object and write single value data into it
                obj, txn = self.container.write_an_obj(single_value_data,
                                                       len(single_value_data) +
                                                       1,
                                                       dkey,
                                                       akey,
                                                       obj_cls=1)

                # read the data back and make sure its correct
                read_back_data = self.container.read_an_obj(
                    len(single_value_data) + 1, dkey, akey, obj, txn)
                if single_value_data != read_back_data.value:
                    print("data I wrote:" + single_value_data)
                    print("data I read back" + read_back_data.value)
                    self.fail("Write data, read it back, didn't match\n")

                # test case 1
                if i == 0:
                    try:
                        # write array value data to same keys, expected to fail
                        self.container.write_an_array_value(array_value_data,
                                                            dkey,
                                                            akey,
                                                            obj,
                                                            obj_cls=1)

                        # above line is expected to return an error,
                        # if not fail the test
                        self.fail(
                            "Array value write to existing single value" +
                            " key should have failed\n")

                    # should fail with -1001 ERR
                    except DaosApiError as excp:
                        if "-1001" not in str(excp):
                            print(excp)
                            self.fail("Should have failed with -1001 error" +
                                      " message, but it did not\n")

                # test case 2 and 3
                elif i == 1 or 2:
                    try:
                        # punch the keys
                        obj.punch_akeys(0, dkey, [akey])
                        obj.punch_dkeys(0, [dkey])

                        if aggregation is True:
                            # trigger aggregation
                            self.container.aggregate(self.container.coh, 0)

                        # write to the same set of keys under same object
                        # with array value type
                        self.container.write_an_array_value(array_value_data,
                                                            dkey,
                                                            akey,
                                                            obj,
                                                            obj_cls=1)

                    # above write of array value should either succeed
                    # or fail with -1001 ERR
                    except DaosApiError as excp:
                        if "-1001" not in str(excp):
                            print(excp)
                            self.fail("Should have failed with -1001 error" +
                                      " message or the write should have" +
                                      " been successful, but it did not\n")

                    # change the value of aggregation to test Test Case 3
                    aggregation = True

                # punch the entire object after each iteration
                obj.close()

            # catch the exception if test fails to write to an object
            # or fails to punch the written object
            except DaosApiError as excp:
                self.fail("Failed to write to akey/dkey or punch the object")

    def test_array_to_single_value(self):
        """
        Jira ID: DAOS-2218
        Test Description: Test to verify different type of
        values passed (i.e array to single value) to the same akey and dkey.
        Case1: Insert akey,dkey with array value
               Insert same akey,dkey with single value
               Result: should return -1001 ERR.
        Case2: Insert akey,dkey with array value
               Punch the keys
               Insert same akey,dkey under same object with single value
               Result: should either pass or return -1001 ERR
        Case3: Insert akey,dkey with array value
               Punch the keys
               Trigger aggregation
               Insert same akey,dkey under same object with single value
               Result: should either pass or return -1001 ERR
        :avocado: tags=object,samekeydifferentvalue,arraytosingle,vm,small
        """

        # define akey,dkey, single value data and array value data
        single_value_data = "a string that I want to stuff into an object"
        array_value_data = []
        array_value_data.append("data string one")
        array_value_data.append("data string two")
        array_value_data.append("data string tre")

        dkey = "this is the dkey"
        akey = "this is the akey"

        aggregation = False

        for i in range(3):
            try:
                # create an object and write array value data into it
                obj, txn = self.container.write_an_array_value(
                    array_value_data, dkey, akey, obj_cls=1)
                # read the data back and make sure its correct
                length = len(array_value_data[0])
                read_back_data = self.container.read_an_array(
                    len(array_value_data), length + 1, dkey, akey, obj, txn)

                for j in range(3):
                    if (array_value_data[j][0:length - 1] !=
                            read_back_data[j][0:length - 1]):
                        print("Written Data: {}".format(array_value_data[j]))
                        print("Read Data: {}".format(read_back_data[j]))
                        self.fail("Data mismatch\n")
                # test case 1
                if i == 0:
                    try:
                        # write single value data to same keys, expected to fail
                        self.container.write_an_obj(single_value_data,
                                                    len(single_value_data) + 1,
                                                    dkey,
                                                    akey,
                                                    obj,
                                                    obj_cls=1)
                        # above line is expected to return an error,
                        # if not fail the test
                        self.fail(
                            "Single value write to existing array value" +
                            " key should have failed\n")

                    # should fail with -1001 ERR
                    except DaosApiError as excp:
                        if "-1001" not in str(excp):
                            print(excp)
                            self.fail("Should have failed with -1001 error" +
                                      " message, but it did not\n")

                # test case 2 and 3
                elif i == 1 or 2:
                    try:
                        # punch the keys
                        obj.punch_akeys(0, dkey, [akey])
                        obj.punch_dkeys(0, [dkey])

                        if aggregation is True:
                            # trigger aggregation
                            self.container.aggregate(self.container.coh, 0)

                        # write to the same set of keys under same object
                        # with single value type
                        self.container.write_an_obj(single_value_data,
                                                    len(single_value_data) + 1,
                                                    dkey,
                                                    akey,
                                                    obj,
                                                    obj_cls=1)
                    # above write of array value should either succeed
                    # or fail with -1001 ERR
                    except DaosApiError as excp:
                        if "-1001" not in str(excp):
                            print(excp)
                            self.fail("Should have failed with -1001 error" +
                                      " message or the write should have" +
                                      " been successful, but it did not\n")

                    # change the value of aggregation to test Test Case 3
                    aggregation = True

                # punch the entire object after each iteration
                obj.close()

            # catch the exception if test fails to write to an object
            # or fails to punch the written object
            except DaosApiError as excp:
                self.fail("Failed to write to akey/dkey or punch the object")
Beispiel #48
0
class OpenContainerTest(Test):
    """
    Tests DAOS container bad create (non existing pool handle, bad uuid)
    and close.

    :avocado: tags=container,containeropen
    """
    def setUp(self):

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.pool1 = None
        self.pool2 = None
        self.container1 = None
        self.container2 = None

        self.hostfile = None
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        # common parameters used in pool create
        self.createmode = self.params.get("mode",
                                          '/run/createtests/createmode/')
        self.createsetid = self.params.get("setname",
                                           '/run/createtests/createset/')
        self.createsize = self.params.get("size",
                                          '/run/createtests/createsize/')

        # pool 1 UID GID
        self.createuid1 = self.params.get("uid", '/run/createtests/createuid1/')
        self.creategid1 = self.params.get("gid", '/run/createtests/creategid1/')

        # pool 2 UID GID
        self.createuid2 = self.params.get("uid", '/run/createtests/createuid2/')
        self.creategid2 = self.params.get("gid", '/run/createtests/creategid2/')

        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

    def tearDown(self):
        try:
            if self.container1 is not None:
                self.container1.destroy()
            if self.container2 is not None:
                self.container2.destroy()
            if self.pool1 is not None and self.pool1.attached:
                self.pool1.destroy(1)
            if self.pool2 is not None and self.pool2.attached:
                self.pool2.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    def test_container_open(self):
        """
        Test basic container bad create.

        :avocado: tags=container,containeropen
        """
        container_uuid = None
        expected_for_param = []
        uuidlist = self.params.get("uuid", '/run/createtests/uuids/*/')
        container_uuid = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        pohlist = self.params.get("poh", '/run/createtests/handles/*/')
        poh = pohlist[0]
        expected_for_param.append(pohlist[1])

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        try:
            # create two pools and try to create containers in these pools
            self.pool1 = DaosPool(self.context)
            self.pool1.create(self.createmode, self.createuid1, self.creategid1,
                              self.createsize, self.createsetid, None)

            self.pool2 = DaosPool(self.context)
            self.pool2.create(self.createmode, self.createuid2, self.creategid2,
                              self.createsize, None, None)

            # Connect to the pools
            self.pool1.connect(1 << 1)
            self.pool2.connect(1 << 1)

            # defines pool handle for container open
            if pohlist[0] == 'pool1':
                poh = self.pool1.handle
            else:
                poh = self.pool2.handle

            # Create a container in pool1
            self.container1 = DaosContainer(self.context)
            self.container1.create(self.pool1.handle)

            # defines test UUID for container open
            if uuidlist[0] == 'pool1':
                struuid = self.container1.get_uuid_str()
                container_uuid = uuid.UUID(struuid)
            else:
                if uuidlist[0] == 'MFUUID':
                    container_uuid = "misformed-uuid-0000"
                else:
                    container_uuid = uuid.uuid4() # random uuid

            # tries to open the container1
            # open should be ok only if poh = pool1.handle &&
            #                           containerUUID = container1.uuid
            self.container1.open(poh, container_uuid)

            # wait a few seconds and then destroy containers
            time.sleep(5)
            self.container1.close()
            self.container1.destroy()
            self.container1 = None

            # cleanup the pools
            self.pool1.disconnect()
            self.pool1.destroy(1)
            self.pool1 = None
            self.pool2.disconnect()
            self.pool2.destroy(1)
            self.pool2 = None

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
        finally:
            if self.hostfile is not None:
                os.remove(self.hostfile)
    def test_no_space_cont_create(self):
        """
        :avocado: tags=all,container,tiny,full_regression,fullpoolcontcreate
        """
        # full storage rc
        err = "-1007"
        # probably should be -1007, revisit later
        err2 = "-1009"

        # create pool
        self.pool = DaosPool(self.context)
        mode = self.params.get("mode", '/conttests/createmode/')
        self.d_log.debug("mode is {0}".format(mode))
        uid = os.geteuid()
        gid = os.getegid()
        # 16 mb pool, minimum size currently possible
        size = 16777216

        self.d_log.debug("creating pool")
        self.pool.create(mode, uid, gid, size, self.server_group, None)
        self.d_log.debug("created pool")

        # connect to the pool
        self.d_log.debug("connecting to pool")
        self.pool.connect(1 << 1)
        self.d_log.debug("connected to pool")

        # query the pool
        self.d_log.debug("querying pool info")
        dummy_pool_info = self.pool.pool_query()
        self.d_log.debug("queried pool info")

        # create a container
        try:
            self.d_log.debug("creating container")
            self.cont = DaosContainer(self.context)
            self.cont.create(self.pool.handle)
            self.d_log.debug("created container")
        except DaosApiError as excep:
            self.d_log.error("caught exception creating container: "
                             "{0}".format(excep))
            self.fail("caught exception creating container: {0}".format(excep))

        self.d_log.debug("opening container")
        self.cont.open()
        self.d_log.debug("opened container")

        # generate random dkey, akey each time
        # write 1mb until no space, then 1kb, etc. to fill pool quickly
        for obj_sz in [1048576, 10240, 10, 1]:
            write_count = 0
            while True:
                self.d_log.debug("writing obj {0}, sz {1} to "
                                 "container".format(write_count, obj_sz))
                my_str = "a" * obj_sz
                my_str_sz = obj_sz
                dkey = (''.join(
                    random.choice(string.lowercase) for i in range(5)))
                akey = (''.join(
                    random.choice(string.lowercase) for i in range(5)))
                try:
                    dummy_oid, dummy_tx = self.cont.write_an_obj(
                        my_str, my_str_sz, dkey, akey, obj_cls="OC_SX")
                    self.d_log.debug("wrote obj {0}, sz {1}".format(
                        write_count, obj_sz))
                    write_count += 1
                except DaosApiError as excep:
                    if not (err in repr(excep) or err2 in repr(excep)):
                        self.d_log.error("caught exception while writing "
                                         "object: {0}".format(repr(excep)))
                        self.fail("caught exception while writing object: {0}".
                                  format(repr(excep)))
                    else:
                        self.d_log.debug("pool is too full for {0} byte "
                                         "objects".format(obj_sz))
                        break

        self.d_log.debug("closing container")
        self.cont.close()
        self.d_log.debug("closed container")
        # create a 2nd container now that pool is full
        try:
            self.d_log.debug("creating 2nd container")
            self.cont2 = DaosContainer(self.context)
            self.cont2.create(self.pool.handle)
            self.d_log.debug("created 2nd container")

            self.d_log.debug("opening container 2")
            self.cont2.open()
            self.d_log.debug("opened container 2")

            self.d_log.debug("writing one more object, write expected to fail")
            self.cont2.write_an_obj(my_str,
                                    my_str_sz,
                                    dkey,
                                    akey,
                                    obj_cls="OC_SX")
            self.fail("wrote one more object after pool was completely filled,"
                      " this should never print")
        except DaosApiError as excep:
            if not (err in repr(excep) or err2 in repr(excep)):
                self.d_log.error("caught unexpected exception while "
                                 "writing object: {0}".format(repr(excep)))
                self.fail("caught unexpected exception while writing "
                          "object: {0}".format(repr(excep)))
            else:
                self.d_log.debug("correctly caught -1007 while attempting "
                                 "to write object in full pool")
Beispiel #50
0
    def test_array_obj(self):
        """
        Test ID: DAOS-961

        Test Description: Writes an array to an object and then reads it
        back and verifies it.

        :avocado: tags=object,arrayobj,regression,vm,small
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/pool_params/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/pool_params/createset/')
            createsize = self.params.get("size", '/run/pool_params/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            self.plog.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            container.query()

            if container.get_uuid_str() != c_uuid_to_str(
                    container.info.ci_uuid):
                self.fail("Container UUID did not match the one in info\n")

            # create an object and write some data into it
            thedata = []
            thedata.append("data string one")
            thedata.append("data string two")
            thedata.append("data string tre")
            dkey = "this is the dkey"
            akey = "this is the akey"

            self.plog.info("writing array to dkey >%s< akey >%s<.", dkey, akey)
            oid, epoch = container.write_an_array_value(thedata, dkey, akey,
                                                        obj_cls=3)

            # read the data back and make sure its correct
            length = len(thedata[0])
            thedata2 = container.read_an_array(len(thedata), length+1,
                                               dkey, akey, oid, epoch)
            if thedata[0][0:length-1] != thedata2[0][0:length-1]:
                self.plog.error("Data mismatch")
                self.plog.error("Wrote: >%s<", thedata[0])
                self.plog.error("Read: >%s<", thedata2[0])
                self.fail("Write data, read it back, didn't match\n")

            if thedata[2][0:length-1] != thedata2[2][0:length-1]:
                self.plog.error("Data mismatch")
                self.plog.error("Wrote: >%s<", thedata[2])
                self.plog.error("Read: >%s<", thedata2[2])
                self.fail("Write data, read it back, didn't match\n")

            container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            container.destroy()

            # cleanup the pool
            pool.disconnect()
            pool.destroy(1)
            self.plog.info("Test Complete")

        except DaosApiError as excep:
            self.plog.error("Test Failed, exception was thrown.")
            print(excep)
            print(traceback.format_exc())
            self.fail("Test was expected to pass but it failed.\n")
Beispiel #51
0
class PunchTest(TestWithServers):
    """
    Simple test to verify the 3 different punch calls.
    :avocado: recursive
    """
    def setUp(self):
        try:
            super(PunchTest, self).setUp()

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/pool/createmode/')
            createsetid = self.params.get("setname", '/run/pool/createset/')
            createsize = self.params.get("size", '/run/pool/createsize/')

            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None)
            self.pool.connect(1 << 1)

            # create a container
            self.container = DaosContainer(self.context)
            self.container.create(self.pool.handle)

            # now open it
            self.container.open()
        except DaosApiError as excpn:
            print(excpn)
            print(traceback.format_exc())
            self.fail("Test failed during setup.\n")

    def tearDown(self):

        try:
            if self.container:
                self.container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            if self.container:
                self.container.destroy()

            # cleanup the pool
            if self.pool:
                self.pool.disconnect()
                self.pool.destroy(1)

        except DaosApiError as excpn:
            print(excpn)
            print(traceback.format_exc())
            self.fail("Test failed during teardown.\n")

        finally:
            super(PunchTest, self).tearDown()

    def test_dkey_punch(self):
        """
        The most basic test of the dkey punch function.

        :avocado: tags=object,punch,dkeypunch,regression,vm,small
        """

        try:
            # create an object and write some data into it
            thedata = "a string that I want to stuff into an object"
            dkey = "this is the dkey"
            akey = "this is the akey"

            obj, txn = self.container.write_an_obj(thedata,
                                                   len(thedata) + 1,
                                                   dkey,
                                                   akey,
                                                   obj_cls=1)

            # read the data back and make sure its correct
            thedata2 = self.container.read_an_obj(
                len(thedata) + 1, dkey, akey, obj, txn)
            if thedata != thedata2.value:
                print("data I wrote:" + thedata)
                print("data I read back" + thedata2.value)
                self.fail("Wrote data, read it back, didn't match\n")

            # now punch this data, should fail, can't punch committed data
            obj.punch_dkeys(txn, [dkey])

            # expecting punch of commit data above to fail
            self.fail("Punch should have failed but it didn't.\n")

        # expecting an exception so do nothing
        except DaosApiError as dummy_e:
            pass

        try:
            # now punch this data
            obj.punch_dkeys(0, [dkey])

        # this one should work so error if exception occurs
        except DaosApiError as dummy_e:
            self.fail("Punch should have worked.\n")

        # there are a bunch of other cases to test here,
        #    --test punching the same updating and punching the same data in
        #    the same tx, should fail
        #    --test non updated data in an open tx, should work

    def test_akey_punch(self):
        """
        The most basic test of the akey punch function.

        :avocado: tags=object,punch,akeypunch,regression,vm,small
        """

        try:
            # create an object and write some data into it
            dkey = "this is the dkey"
            data1 = [("this is akey 1", "this is data value 1"),
                     ("this is akey 2", "this is data value 2"),
                     ("this is akey 3", "this is data value 3")]
            obj, txn = self.container.write_multi_akeys(dkey, data1, obj_cls=1)

            # read back the 1st epoch's data and check 1 value just to make sure
            # everything is on the up and up
            readbuf = [(data1[0][0], len(data1[0][1]) + 1),
                       (data1[1][0], len(data1[1][1]) + 1),
                       (data1[2][0], len(data1[2][1]) + 1)]
            retrieved_data = self.container.read_multi_akeys(
                dkey, readbuf, obj, txn)
            if retrieved_data[data1[1][0]] != data1[1][1]:
                print("middle akey: {}".format(retrieved_data[data1[1][0]]))
                self.fail("data retrieval failure")

            # now punch one akey from this data
            obj.punch_akeys(txn, dkey, [data1[1][0]])

            # expecting punch of commit data above to fail
            self.fail("Punch should have failed but it didn't.\n")

        # expecting an exception so do nothing
        except DaosApiError as excep:
            print(excep)

        try:
            # now punch the object without a tx
            obj.punch_akeys(0, dkey, [data1[1][0]])

        # expecting it to work this time so error
        except DaosApiError as excep:
            self.fail("Punch should have worked.\n")

    def test_obj_punch(self):
        """
        The most basic test of the object punch function.  Really similar
        to above except the whole object is deleted.

        :avocado: tags=object,punch,objpunch,regression,vm,small
        """

        try:

            # create an object and write some data into it
            thedata = "a string that I want to stuff into an object"
            dkey = "this is the dkey"
            akey = "this is the akey"

            obj, txn = self.container.write_an_obj(thedata,
                                                   len(thedata) + 1,
                                                   dkey,
                                                   akey,
                                                   obj_cls=1)

            # read the data back and make sure its correct
            thedata2 = self.container.read_an_obj(
                len(thedata) + 1, dkey, akey, obj, txn)
            if thedata != thedata2.value:
                print("data I wrote:" + thedata)
                print("data I read back" + thedata2.value)
                self.fail("Wrote data, read it back, didn't match\n")

            # now punch the object, commited so not expecting it to work
            obj.punch(txn)

            # expecting punch of commit data above to fail
            self.fail("Punch should have failed but it didn't.\n")

        # expecting an exception so do nothing
        except DaosApiError as excep:
            print(excep)

        try:
            obj.punch(0)

        # expecting it to work without a tx
        except DaosApiError as excep:
            print(excep)
            self.fail("Punch should have worked.\n")
Beispiel #52
0
    def test_container_open(self):
        """
        Test basic container bad create.

        :avocado: tags=container,containeropen
        """
        container_uuid = None
        expected_for_param = []
        uuidlist = self.params.get("uuid", '/run/createtests/uuids/*/')
        container_uuid = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        pohlist = self.params.get("poh", '/run/createtests/handles/*/')
        poh = pohlist[0]
        expected_for_param.append(pohlist[1])

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        try:
            # create two pools and try to create containers in these pools
            self.pool1 = DaosPool(self.context)
            self.pool1.create(self.createmode, self.createuid1, self.creategid1,
                              self.createsize, self.createsetid, None)

            self.pool2 = DaosPool(self.context)
            self.pool2.create(self.createmode, self.createuid2, self.creategid2,
                              self.createsize, None, None)

            # Connect to the pools
            self.pool1.connect(1 << 1)
            self.pool2.connect(1 << 1)

            # defines pool handle for container open
            if pohlist[0] == 'pool1':
                poh = self.pool1.handle
            else:
                poh = self.pool2.handle

            # Create a container in pool1
            self.container1 = DaosContainer(self.context)
            self.container1.create(self.pool1.handle)

            # defines test UUID for container open
            if uuidlist[0] == 'pool1':
                struuid = self.container1.get_uuid_str()
                container_uuid = uuid.UUID(struuid)
            else:
                if uuidlist[0] == 'MFUUID':
                    container_uuid = "misformed-uuid-0000"
                else:
                    container_uuid = uuid.uuid4() # random uuid

            # tries to open the container1
            # open should be ok only if poh = pool1.handle &&
            #                           containerUUID = container1.uuid
            self.container1.open(poh, container_uuid)

            # wait a few seconds and then destroy containers
            time.sleep(5)
            self.container1.close()
            self.container1.destroy()
            self.container1 = None

            # cleanup the pools
            self.pool1.disconnect()
            self.pool1.destroy(1)
            self.pool1 = None
            self.pool2.disconnect()
            self.pool2.destroy(1)
            self.pool2 = None

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
        finally:
            if self.hostfile is not None:
                os.remove(self.hostfile)
Beispiel #53
0
class ContainerAttributeTest(TestWithServers):
    """
    Tests DAOS container attribute get/set/list.
    :avocado: recursive
    """
    def setUp(self):
        super(ContainerAttributeTest, self).setUp()

        self.large_data_set = {}

        self.pool = DaosPool(self.context)
        self.pool.create(
            self.params.get("mode", '/run/attrtests/createmode/*'),
            os.geteuid(), os.getegid(),
            self.params.get("size", '/run/attrtests/createsize/*'),
            self.params.get("setname", '/run/attrtests/createset/*'), None)
        self.pool.connect(1 << 1)
        poh = self.pool.handle
        self.container = DaosContainer(self.context)
        self.container.create(poh)
        self.container.open()

    def tearDown(self):
        try:
            if self.container:
                self.container.close()
        finally:
            super(ContainerAttributeTest, self).tearDown()

    def create_data_set(self):
        """
        To create the large attribute dictionary
        """
        allchar = string.ascii_letters + string.digits
        for i in range(1024):
            self.large_data_set[str(i)] = ("".join(
                random.choice(allchar) for x in range(random.randint(1, 100))))

    def test_container_attribute(self):
        """
        Test basic container attribute tests.
        :avocado: tags=container,container_attr,attribute,sync_conattribute
        """
        expected_for_param = []
        name = self.params.get("name", '/run/attrtests/name_handles/*/')
        expected_for_param.append(name[1])
        value = self.params.get("value", '/run/attrtests/value_handles/*/')
        expected_for_param.append(value[1])

        attr_dict = {name[0]: value[0]}
        if name[0] is not None:
            if "largenumberofattr" in name[0]:
                self.create_data_set()
                attr_dict = self.large_data_set
                attr_dict[name[0]] = value[0]

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break
        try:
            self.container.set_attr(data=attr_dict)
            size, buf = self.container.list_attr()

            verify_list_attr(attr_dict, size, buf)

            # Request something that doesn't exist
            if name[0] is not None and "Negative" in name[0]:
                name[0] = "rubbish"

            results = {}
            results = self.container.get_attr([name[0]])

            # for this test the dictionary has been altered, need to just
            # set it to what we are expecting to get back
            if name[0] is not None:
                if "largenumberofattr" in name[0]:
                    attr_dict.clear()
                    attr_dict[name[0]] = value[0]

            verify_get_attr(attr_dict, results)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except (DaosApiError, DaosTestError) as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")

    def test_container_attribute_asyn(self):
        """
        Test basic container attribute tests.

        :avocado: tags=container,container_attr,attribute,async_conattribute
        """
        global GLOB_SIGNAL
        global GLOB_RC

        expected_for_param = []
        name = self.params.get("name", '/run/attrtests/name_handles/*/')
        expected_for_param.append(name[1])
        value = self.params.get("value", '/run/attrtests/value_handles/*/')
        expected_for_param.append(value[1])

        attr_dict = {name[0]: value[0]}
        if name[0] is not None:
            if "largenumberofattr" in name[0]:
                self.create_data_set()
                attr_dict = self.large_data_set
                attr_dict[name[0]] = value[0]

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break
        try:
            GLOB_SIGNAL = threading.Event()
            self.container.set_attr(data=attr_dict, cb_func=cb_func)
            GLOB_SIGNAL.wait()
            if GLOB_RC != 0 and expected_result in ['PASS']:
                self.fail("RC not as expected after set_attr First {0}".format(
                    GLOB_RC))

            GLOB_SIGNAL = threading.Event()
            size, buf = self.container.list_attr(cb_func=cb_func)
            GLOB_SIGNAL.wait()
            if GLOB_RC != 0 and expected_result in ['PASS']:
                self.fail(
                    "RC not as expected after list_attr First {0}".format(
                        GLOB_RC))
            if expected_result in ['PASS']:
                verify_list_attr(attr_dict, size, buf, mode="async")

            # Request something that doesn't exist
            if name[0] is not None and "Negative" in name[0]:
                name[0] = "rubbish"

            GLOB_SIGNAL = threading.Event()
            self.container.get_attr([name[0]], cb_func=cb_func)

            GLOB_SIGNAL.wait()

            if GLOB_RC != 0 and expected_result in ['PASS']:
                self.fail(
                    "RC not as expected after get_attr {0}".format(GLOB_RC))

            # not verifying the get_attr since its not available asynchronously

            if value[0] != None:
                if GLOB_RC == 0 and expected_result in ['FAIL']:
                    self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
Beispiel #54
0
class CreateContainerTest(Test):
    """
    Tests DAOS container create.
    """
    def setUp(self):
        self.agent_sessions = None
        self.hostlist = None

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        self.server_group = self.params.get("name", '/server_config/')

        self.container = None

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        hostfile = write_host_file.write_host_file(self.hostlist, self.workdir)

        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(hostfile, self.server_group, self.basepath)

    def tearDown(self):
        if self.agent_sessions:
            AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
        server_utils.stop_server(None, self.hostlist)

    def test_container_create(self):
        """
        Test ID: DAOS-689

        Test Description: valid and invalid container creation and close.

        :avocado: tags=regression,cont,contcreate
        """

        pool = None
        contuuid = None
        expected_results = []

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            createmode = self.params.get("mode", '/run/poolparams/')
            createuid = os.geteuid()
            creategid = os.getegid()
            createsetid = self.params.get("setname", '/run/poolparams/')
            createsize = self.params.get("size", '/run/poolparams/')

            # setup the pool
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid)
            pool.connect(1 << 1)

            # maybe use the good handle, maybe not
            handleparam = self.params.get("handle", '/run/poolhandle/*')
            if handleparam == 'VALID':
                poh = pool.handle
            else:
                poh = handleparam
                expected_results.append('FAIL')

            # maybe use a good UUID, maybe not
            uuidparam = self.params.get("uuid", "/uuids/*")
            expected_results.append(uuidparam[1])
            if uuidparam[0] == 'NULLPTR':
                self.cancel("skipping this test until DAOS-2043 is fixed")
                contuuid = 'NULLPTR'
            else:
                contuuid = uuid.UUID(uuidparam[0])

            should_fail = False
            for result in expected_results:
                if result == 'FAIL':
                    should_fail = True
                    break

            self.container = DaosContainer(self.context)
            self.container.create(poh, contuuid)

            # check UUID is the specified one
            if (uuidparam[0]).upper() != self.container.get_uuid_str().upper():
                print("uuidparam[0] is {}, uuid_str is {}".format(
                    uuidparam[0], self.container.get_uuid_str()))
                self.fail("Container UUID differs from specified at create\n")

            if should_fail:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if not should_fail:
                self.fail("Test was expected to pass but it failed.\n")
        finally:
            if pool is not None:
                pool.disconnect()
                pool.destroy(1)
Beispiel #55
0
    def test_global_handle(self):
        """
        Test ID: DAO

        Test Description: Use a pool handle in another process.

        :avocado: tags=container,conthandle,vm,small,regression
        """

        try:

            # use the uid/gid of the user running the test, these should
            # be perfectly valid
            createuid = os.geteuid()
            creategid = os.getegid()

            # parameters used in pool create that are in yaml
            createmode = self.params.get("mode", '/run/testparams/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/testparams/createset/')
            createsize = self.params.get("size", '/run/testparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.Context)
            pool.create(createmode, createuid, creategid, createsize,
                        createsetid, None)
            pool.connect(1 << 1)

            # create a pool global handle
            iov_len, buf_len, buf = pool.local2global()
            buftype = ctypes.c_byte * buf_len
            c_buf = buftype.from_buffer(buf)
            sct_pool_handle = sharedctypes.RawValue(
                IOV, ctypes.cast(c_buf, ctypes.c_void_p), buf_len, iov_len)

            # create a container
            container = DaosContainer(self.Context)
            container.create(pool.handle)
            container.open()

            # create a container global handle
            iov_len, buf_len, buf = container.local2global()
            buftype = ctypes.c_byte * buf_len
            c_buf = buftype.from_buffer(buf)
            sct_cont_handle = sharedctypes.RawValue(
                IOV, ctypes.cast(c_buf, ctypes.c_void_p), buf_len, iov_len)

            sct_pool_uuid = sharedctypes.RawArray(ctypes.c_byte, pool.uuid)
            # this should work in the future but need on-line server addition
            #arg_list = (
            #p = Process(target=CheckHandle, args=arg_list)
            #p.start()
            #p.join()
            # for now verifying global handle in the same process which is not
            # the intended use case
            CheckHandle(sct_pool_handle, sct_pool_uuid, sct_cont_handle, 0)

        except DaosApiError as e:
            print(e)
            print(traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")
Beispiel #56
0
    def test_container_open(self):
        """
        Test basic container bad create.

        :avocado: tags=container,containeropen
        """

        expected_for_param = []
        uuidlist  = self.params.get("uuid",'/run/createtests/uuids/*/')
        containerUUID = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        pohlist = self.params.get("poh",'/run/createtests/handles/*/')
        poh = pohlist[0]
        expected_for_param.append(pohlist[1])

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        try:
            # create two pools and try to create containers in these pools
            self.POOL1 = DaosPool(self.Context)
            self.POOL1.create(self.createmode, self.createuid1, self.creategid1,
                              self.createsize, self.createsetid, None)

            self.POOL2 = DaosPool(self.Context)
            self.POOL2.create(self.createmode, self.createuid2, self.creategid2,
                              self.createsize, None, None)

            # Connect to the pools
            self.POOL1.connect(1 << 1)
            self.POOL2.connect(1 << 1)

            # defines pool handle for container open
            if pohlist[0] == 'POOL1':
                poh = self.POOL1.handle
            else:
                poh = self.POOL2.handle

            # Create a container in POOL1
            self.CONTAINER1 = DaosContainer(self.Context)
            self.CONTAINER1.create(self.POOL1.handle)

            # defines test UUID for container open
            if uuidlist[0] == 'POOL1':
                struuid = self.CONTAINER1.get_uuid_str()
                containerUUID = uuid.UUID(struuid)
            else:
                if uuidlist[0] == 'MFUUID':
                    containerUUID = "misformed-uuid-0000"
                else:
                    containerUUID = uuid.uuid4() # random uuid

            # tries to open the container1
            # open should be ok only if poh = POOL1.handle && containerUUID = CONTAINER1.uuid
            self.CONTAINER1.open(poh, containerUUID)

            # wait a few seconds and then destroy containers
            time.sleep(5)
            self.CONTAINER1.close()
            self.CONTAINER1.destroy()
            self.CONTAINER1 = None

            # cleanup the pools
            self.POOL1.disconnect()
            self.POOL1.destroy(1)
            self.POOL1 = None
            self.POOL2.disconnect()
            self.POOL2.destroy(1)
            self.POOL2 = None

            if expected_result in ['FAIL']:
                    self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as e:
            print(e)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
        finally:
            if self.hostfile is not None:
                os.remove(self.hostfile)
Beispiel #57
0
class OpenContainerTest(Test):
    """
    Tests DAOS container bad create (non existing pool handle, bad uuid)
    and close.

    :avocado: tags=container,containeropen
    """
    def setUp(self):

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as f:
            build_paths = json.load(f)
        self.basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        self.tmp = build_paths['PREFIX'] + '/tmp'

        self.server_group = self.params.get("server_group",'/server/',
                                           'daos_server')

        # setup the DAOS python API
        self.Context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.POOL1 = None
        self.POOL2 = None
        self.CONTAINER1 = None
        self.CONTAINER2 = None

        self.hostfile = None
        self.hostlist = self.params.get("test_machines",'/run/hosts/*')
        self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.tmp)

        # common parameters used in pool create
        self.createmode = self.params.get("mode",'/run/createtests/createmode/')
        self.createsetid = self.params.get("setname",'/run/createtests/createset/')
        self.createsize  = self.params.get("size",'/run/createtests/createsize/')

        # POOL 1 UID GID
        self.createuid1  = self.params.get("uid",'/run/createtests/createuid1/')
        self.creategid1  = self.params.get("gid",'/run/createtests/creategid1/')

        # POOL 2 UID GID
        self.createuid2  = self.params.get("uid",'/run/createtests/createuid2/')
        self.creategid2  = self.params.get("gid",'/run/createtests/creategid2/')

        ServerUtils.runServer(self.hostfile, self.server_group, self.basepath)

    def tearDown(self):
        try:
            if self.CONTAINER1 is not None:
                self.CONTAINER1.destroy()
            if self.CONTAINER2 is not None:
                self.CONTAINER2.destroy()
            if self.POOL1 is not None and self.POOL1.attached:
                self.POOL1.destroy(1)
            if self.POOL2 is not None and self.POOL2.attached:
                self.POOL2.destroy(1)
        finally:
            ServerUtils.stopServer(hosts=self.hostlist)

    def test_container_open(self):
        """
        Test basic container bad create.

        :avocado: tags=container,containeropen
        """

        expected_for_param = []
        uuidlist  = self.params.get("uuid",'/run/createtests/uuids/*/')
        containerUUID = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        pohlist = self.params.get("poh",'/run/createtests/handles/*/')
        poh = pohlist[0]
        expected_for_param.append(pohlist[1])

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        try:
            # create two pools and try to create containers in these pools
            self.POOL1 = DaosPool(self.Context)
            self.POOL1.create(self.createmode, self.createuid1, self.creategid1,
                              self.createsize, self.createsetid, None)

            self.POOL2 = DaosPool(self.Context)
            self.POOL2.create(self.createmode, self.createuid2, self.creategid2,
                              self.createsize, None, None)

            # Connect to the pools
            self.POOL1.connect(1 << 1)
            self.POOL2.connect(1 << 1)

            # defines pool handle for container open
            if pohlist[0] == 'POOL1':
                poh = self.POOL1.handle
            else:
                poh = self.POOL2.handle

            # Create a container in POOL1
            self.CONTAINER1 = DaosContainer(self.Context)
            self.CONTAINER1.create(self.POOL1.handle)

            # defines test UUID for container open
            if uuidlist[0] == 'POOL1':
                struuid = self.CONTAINER1.get_uuid_str()
                containerUUID = uuid.UUID(struuid)
            else:
                if uuidlist[0] == 'MFUUID':
                    containerUUID = "misformed-uuid-0000"
                else:
                    containerUUID = uuid.uuid4() # random uuid

            # tries to open the container1
            # open should be ok only if poh = POOL1.handle && containerUUID = CONTAINER1.uuid
            self.CONTAINER1.open(poh, containerUUID)

            # wait a few seconds and then destroy containers
            time.sleep(5)
            self.CONTAINER1.close()
            self.CONTAINER1.destroy()
            self.CONTAINER1 = None

            # cleanup the pools
            self.POOL1.disconnect()
            self.POOL1.destroy(1)
            self.POOL1 = None
            self.POOL2.disconnect()
            self.POOL2.destroy(1)
            self.POOL2 = None

            if expected_result in ['FAIL']:
                    self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as e:
            print(e)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
        finally:
            if self.hostfile is not None:
                os.remove(self.hostfile)
class FullPoolContainerCreate(Test):
    """
    Class for test to create a container in a pool with no remaining free space.
    """

    def setUp(self):
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               "../../../../.build_vars.json")) as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_default_oops')

        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.cont = None
        self.cont2 = None
        self.pool = DaosPool(self.context)
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("test_machines1", '/hosts/')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

    def tearDown(self):
        # shut 'er down
        """
        wrap pool destroy in a try; in case pool create didn't succeed, we
        still need the server to be shut down in any case
        """
        try:
            self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    def test_no_space_cont_create(self):
        """
        :avocado: tags=pool,cont,fullpoolcontcreate,small,vm
        """
        # full storage rc
        err = "-1007"
        # probably should be -1007, revisit later
        err2 = "-1009"

        # create pool
        mode = self.params.get("mode", '/conttests/createmode/')
        self.d_log.debug("mode is {0}".format(mode))
        uid = os.geteuid()
        gid = os.getegid()
        # 16 mb pool, minimum size currently possible
        size = 16777216

        self.d_log.debug("creating pool")
        self.pool.create(mode, uid, gid, size, self.server_group, None)
        self.d_log.debug("created pool")

        # connect to the pool
        self.d_log.debug("connecting to pool")
        self.pool.connect(1 << 1)
        self.d_log.debug("connected to pool")

        # query the pool
        self.d_log.debug("querying pool info")
        dummy_pool_info = self.pool.pool_query()
        self.d_log.debug("queried pool info")

        # create a container
        try:
            self.d_log.debug("creating container")
            self.cont = DaosContainer(self.context)
            self.cont.create(self.pool.handle)
            self.d_log.debug("created container")
        except DaosApiError as excep:
            self.d_log.error("caught exception creating container: "
                             "{0}".format(excep))
            self.fail("caught exception creating container: {0}".format(excep))

        self.d_log.debug("opening container")
        self.cont.open()
        self.d_log.debug("opened container")

        # generate random dkey, akey each time
        # write 1mb until no space, then 1kb, etc. to fill pool quickly
        for obj_sz in [1048576, 1024, 1]:
            write_count = 0
            while True:
                self.d_log.debug("writing obj {0}, sz {1} to "
                                 "container".format(write_count, obj_sz))
                my_str = "a" * obj_sz
                my_str_sz = obj_sz
                dkey = (
                    ''.join(random.choice(string.lowercase) for i in range(5)))
                akey = (
                    ''.join(random.choice(string.lowercase) for i in range(5)))
                try:
                    dummy_oid, dummy_tx = self.cont.write_an_obj(my_str,
                                                                 my_str_sz,
                                                                 dkey, akey,
                                                                 obj_cls=1)
                    self.d_log.debug("wrote obj {0}, sz {1}".format(write_count,
                                                                    obj_sz))
                    write_count += 1
                except DaosApiError as excep:
                    if not (err in repr(excep) or err2 in repr(excep)):
                        self.d_log.error("caught exception while writing "
                                         "object: {0}".format(repr(excep)))
                        self.fail("caught exception while writing object: {0}"
                                  .format(repr(excep)))
                    else:
                        self.d_log.debug("pool is too full for {0} byte "
                                         "objects".format(obj_sz))
                        break

        self.d_log.debug("closing container")
        self.cont.close()
        self.d_log.debug("closed container")
        # create a 2nd container now that pool is full
        try:
            self.d_log.debug("creating 2nd container")
            self.cont2 = DaosContainer(self.context)
            self.cont2.create(self.pool.handle)
            self.d_log.debug("created 2nd container")

            self.d_log.debug("opening container 2")
            self.cont2.open()
            self.d_log.debug("opened container 2")

            self.d_log.debug("writing one more object, write expected to fail")
            self.cont2.write_an_obj(my_str, my_str_sz, dkey, akey, obj_cls=1)
            self.d_log.debug("wrote one more object--this should never print")
        except DaosApiError as excep:
            if not (err in repr(excep) or err2 in repr(excep)):
                self.d_log.error("caught unexpected exception while "
                                 "writing object: {0}".format(repr(excep)))
                self.fail("caught unexpected exception while writing "
                          "object: {0}".format(repr(excep)))
            else:
                self.d_log.debug("correctly caught -1007 while attempting "
                                 "to write object in full pool")
    def test_container_basics(self):
        """
        Test basic container create/destroy/open/close/query.  Nothing fancy
        just making sure they work at a rudimentary level

        :avocado: tags=container,containercreate,containerdestroy,basecont
        """

        pool = None
        hostlist = None

        try:
            hostlist = self.params.get("test_machines", '/run/hosts/*')
            hostfile = write_host_file.write_host_file(hostlist,
                                                       self.workdir)

            self.agent_sessions = AgentUtils.run_agent(self.basepath, hostlist)
            server_utils.run_server(hostfile, self.server_group, self.basepath)

            # give it time to start
            time.sleep(2)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createuid = self.params.get("uid", '/run/conttests/createuid/')
            creategid = self.params.get("gid", '/run/conttests/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)

            # now open it
            container.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            container.query()

            if container.get_uuid_str() != c_uuid_to_str(
                    container.info.ci_uuid):
                self.fail("Container UUID did not match the one in info'n")

            container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            container.destroy()

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test was expected to pass but it failed.\n")
        except Exception as excep:
            self.fail("Daos code segfaulted most likely, error: %s" % excep)
        finally:
            # cleanup the pool
            if pool is not None:
                pool.disconnect()
                pool.destroy(1)
            if self.agent_sessions:
                AgentUtils.stop_agent(hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=hostlist)
    def test_no_space_cont_create(self):
        """
        :avocado: tags=pool,cont,fullpoolcontcreate,small,vm
        """
        # full storage rc
        err = "-1007"
        # probably should be -1007, revisit later
        err2 = "-1009"

        # create pool
        mode = self.params.get("mode", '/conttests/createmode/')
        self.d_log.debug("mode is {0}".format(mode))
        uid = os.geteuid()
        gid = os.getegid()
        # 16 mb pool, minimum size currently possible
        size = 16777216

        self.d_log.debug("creating pool")
        self.pool.create(mode, uid, gid, size, self.server_group, None)
        self.d_log.debug("created pool")

        # connect to the pool
        self.d_log.debug("connecting to pool")
        self.pool.connect(1 << 1)
        self.d_log.debug("connected to pool")

        # query the pool
        self.d_log.debug("querying pool info")
        dummy_pool_info = self.pool.pool_query()
        self.d_log.debug("queried pool info")

        # create a container
        try:
            self.d_log.debug("creating container")
            self.cont = DaosContainer(self.context)
            self.cont.create(self.pool.handle)
            self.d_log.debug("created container")
        except DaosApiError as excep:
            self.d_log.error("caught exception creating container: "
                             "{0}".format(excep))
            self.fail("caught exception creating container: {0}".format(excep))

        self.d_log.debug("opening container")
        self.cont.open()
        self.d_log.debug("opened container")

        # generate random dkey, akey each time
        # write 1mb until no space, then 1kb, etc. to fill pool quickly
        for obj_sz in [1048576, 1024, 1]:
            write_count = 0
            while True:
                self.d_log.debug("writing obj {0}, sz {1} to "
                                 "container".format(write_count, obj_sz))
                my_str = "a" * obj_sz
                my_str_sz = obj_sz
                dkey = (
                    ''.join(random.choice(string.lowercase) for i in range(5)))
                akey = (
                    ''.join(random.choice(string.lowercase) for i in range(5)))
                try:
                    dummy_oid, dummy_tx = self.cont.write_an_obj(my_str,
                                                                 my_str_sz,
                                                                 dkey, akey,
                                                                 obj_cls=1)
                    self.d_log.debug("wrote obj {0}, sz {1}".format(write_count,
                                                                    obj_sz))
                    write_count += 1
                except DaosApiError as excep:
                    if not (err in repr(excep) or err2 in repr(excep)):
                        self.d_log.error("caught exception while writing "
                                         "object: {0}".format(repr(excep)))
                        self.fail("caught exception while writing object: {0}"
                                  .format(repr(excep)))
                    else:
                        self.d_log.debug("pool is too full for {0} byte "
                                         "objects".format(obj_sz))
                        break

        self.d_log.debug("closing container")
        self.cont.close()
        self.d_log.debug("closed container")
        # create a 2nd container now that pool is full
        try:
            self.d_log.debug("creating 2nd container")
            self.cont2 = DaosContainer(self.context)
            self.cont2.create(self.pool.handle)
            self.d_log.debug("created 2nd container")

            self.d_log.debug("opening container 2")
            self.cont2.open()
            self.d_log.debug("opened container 2")

            self.d_log.debug("writing one more object, write expected to fail")
            self.cont2.write_an_obj(my_str, my_str_sz, dkey, akey, obj_cls=1)
            self.d_log.debug("wrote one more object--this should never print")
        except DaosApiError as excep:
            if not (err in repr(excep) or err2 in repr(excep)):
                self.d_log.error("caught unexpected exception while "
                                 "writing object: {0}".format(repr(excep)))
                self.fail("caught unexpected exception while writing "
                          "object: {0}".format(repr(excep)))
            else:
                self.d_log.debug("correctly caught -1007 while attempting "
                                 "to write object in full pool")