Example #1
0
def check_handle(pool_glob_handle, uuidstr, cont_glob_handle, rank):
    """
    This gets run in a child process and verifyes the global
    handles can be turned into local handles in another process.
    """

    try:
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)

        # setup the DAOS python API in this process
        context = DaosContext(build_paths['PREFIX'] + '/lib/')


        # setup the pool and connect using global handle
        pool = DaosPool(context)
        pool.uuid = uuidstr
        pool.set_svc(rank)
        pool.group = "daos_server"
        buf = ctypes.cast(pool_glob_handle.iov_buf,
                          ctypes.POINTER(ctypes.c_byte *
                                         pool_glob_handle.iov_buf_len))
        buf2 = bytearray()
        buf2.extend(buf.contents)
        pool_handle = pool.global2local(context,
                                        pool_glob_handle.iov_len,
                                        pool_glob_handle.iov_buf_len,
                                        buf2)

        # perform an operation that will use the new handle, if it
        # doesn't throw an exception, then all is well.
        pool.pool_query()

        # setup the container and then connect using the global handle
        container = DaosContainer(context)
        container.poh = pool_handle
        buf = ctypes.cast(cont_glob_handle.iov_buf,
                          ctypes.POINTER(ctypes.c_byte *
                                         cont_glob_handle.iov_buf_len))
        buf2 = bytearray()
        buf2.extend(buf.contents)
        dummy_cont_handle = container.global2local(context,
                                                   cont_glob_handle.iov_len,
                                                   cont_glob_handle.iov_buf_len,
                                                   buf2)
        # just try one thing to make sure handle is good
        container.query()

    except DaosApiError as excep:
        print(excep)
        print(traceback.format_exc())
        raise

    return
Example #2
0
def check_handle(pool_glob_handle, uuidstr, cont_glob_handle, rank):
    """
    This gets run in a child process and verifyes the global
    handles can be turned into local handles in another process.
    """

    try:
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)

        # setup the DAOS python API in this process
        context = DaosContext(build_paths['PREFIX'] + '/lib/')


        # setup the pool and connect using global handle
        pool = DaosPool(context)
        pool.uuid = uuidstr
        pool.set_svc(rank)
        pool.group = "daos_server"
        buf = ctypes.cast(pool_glob_handle.iov_buf,
                          ctypes.POINTER(ctypes.c_byte *
                                         pool_glob_handle.iov_buf_len))
        buf2 = bytearray()
        buf2.extend(buf.contents)
        pool_handle = pool.global2local(context,
                                        pool_glob_handle.iov_len,
                                        pool_glob_handle.iov_buf_len,
                                        buf2)

        # perform an operation that will use the new handle, if it
        # doesn't throw an exception, then all is well.
        pool.pool_query()

        # setup the container and then connect using the global handle
        container = DaosContainer(context)
        container.poh = pool_handle
        buf = ctypes.cast(cont_glob_handle.iov_buf,
                          ctypes.POINTER(ctypes.c_byte *
                                         cont_glob_handle.iov_buf_len))
        buf2 = bytearray()
        buf2.extend(buf.contents)
        dummy_cont_handle = container.global2local(context,
                                                   cont_glob_handle.iov_len,
                                                   cont_glob_handle.iov_buf_len,
                                                   buf2)
        # just try one thing to make sure handle is good
        container.query()

    except DaosApiError as excep:
        print(excep)
        print(traceback.format_exc())
        raise

    return
    def test_container_basics(self):
        """
        Test basic container create/destroy/open/close/query.  Nothing fancy
        just making sure they work at a rudimentary level

        :avocado: tags=container,containercreate,containerdestroy,basecont
        """

        pool = None
        hostlist = None

        try:
            hostlist = self.params.get("test_machines", '/run/hosts/*')
            hostfile = write_host_file.write_host_file(hostlist,
                                                       self.workdir)

            self.agent_sessions = AgentUtils.run_agent(self.basepath, hostlist)
            server_utils.run_server(hostfile, self.server_group, self.basepath)

            # give it time to start
            time.sleep(2)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createuid = self.params.get("uid", '/run/conttests/createuid/')
            creategid = self.params.get("gid", '/run/conttests/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)

            # now open it
            container.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            container.query()

            if container.get_uuid_str() != c_uuid_to_str(
                    container.info.ci_uuid):
                self.fail("Container UUID did not match the one in info'n")

            container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            container.destroy()

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test was expected to pass but it failed.\n")
        except Exception as excep:
            self.fail("Daos code segfaulted most likely, error: %s" % excep)
        finally:
            # cleanup the pool
            if pool is not None:
                pool.disconnect()
                pool.destroy(1)
            if self.agent_sessions:
                AgentUtils.stop_agent(hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=hostlist)
Example #4
0
    def test_array_obj(self):
        """
        Test ID: DAOS-961

        Test Description: Writes an array to an object and then reads it
        back and verifies it.

        :avocado: tags=object,arrayobj,regression,vm,small
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()
            print("uid is {} gid is {}".format(createuid, creategid))

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.Context)
            pool.create(createmode, createuid, creategid, createsize,
                        createsetid, None)
            self.pl.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.Context)
            container.create(pool.handle)
            self.pl.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            container.query()

            if container.get_uuid_str() != c_uuid_to_str(
                    container.info.ci_uuid):
                self.fail("Container UUID did not match the one in info\n")

            # create an object and write some data into it
            thedata = []
            thedata.append("data string one")
            thedata.append("data string two")
            thedata.append("data string tre")
            dkey = "this is the dkey"
            akey = "this is the akey"

            self.pl.info("writing array to dkey >%s< akey >%s<.", dkey, akey)
            oid, epoch = container.write_an_array_value(thedata, dkey, akey)

            # read the data back and make sure its correct
            length = len(thedata[0])
            thedata2 = container.read_an_array(len(thedata), length + 1, dkey,
                                               akey, oid, epoch)
            if thedata[0][0:length - 1] != thedata2[0][0:length - 1]:
                self.pl.error("Data mismatch")
                self.pl.error("Wrote: >%s<" (thedata[0]))
                self.pl.error("Read: >%s<" (thedata2[0]))
                self.fail("Write data, read it back, didn't match\n")

            if thedata[2][0:length - 1] != thedata2[2][0:length - 1]:
                self.pl.error("Data mismatch")
                self.pl.error("Wrote: >%s<" (thedata[2]))
                self.pl.error("Read: >%s<" (thedata2[2]))
                self.fail("Write data, read it back, didn't match\n")

            container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            container.destroy()

            # cleanup the pool
            pool.disconnect()
            pool.destroy(1)
            self.pl.info("Test Complete")

        except ValueError as e:
            self.pl.error("Test Failed, exception was thrown.")
            print e
            print traceback.format_exc()
            self.fail("Test was expected to pass but it failed.\n")
    def test_container_basics(self):
        """
        Test basic container create/destroy/open/close/query.  Nothing fancy
        just making sure they work at a rudimentary level

        :avocado: tags=container,containercreate,containerdestroy,basecont
        """

        pool = None
        hostlist = None

        try:
            hostlist = self.params.get("test_machines", '/run/hosts/*')
            hostfile = write_host_file.write_host_file(hostlist,
                                                       self.workdir)

            self.agent_sessions = agent_utils.run_agent(self.basepath, hostlist)
            server_utils.run_server(hostfile, self.server_group, self.basepath)

            # give it time to start
            time.sleep(2)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createuid = self.params.get("uid", '/run/conttests/createuid/')
            creategid = self.params.get("gid", '/run/conttests/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)

            # now open it
            container.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            container.query()

            if container.get_uuid_str() != c_uuid_to_str(
                    container.info.ci_uuid):
                self.fail("Container UUID did not match the one in info'n")

            container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            container.destroy()

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test was expected to pass but it failed.\n")
        except Exception as excep:
            self.fail("Daos code segfaulted most likely, error: %s" % excep)
        finally:
            # cleanup the pool
            if pool is not None:
                pool.disconnect()
                pool.destroy(1)
            if self.agent_sessions:
                agent_utils.stop_agent(self.agent_sessions)
            server_utils.stop_server(hosts=hostlist)
Example #6
0
class ContainerAsync(Test):
    """
    Tests DAOS pool connect permissions (non existing pool handle, bad uuid)
    and close.

    :avocado: tags=container,containercreate2,connectpermission
    """
    def setUp(self):
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as f:
            build_paths = json.load(f)
        self.basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        self.tmp = build_paths['PREFIX'] + '/tmp'

        self.server_group = self.params.get("server_group",'/server/','daos_server')

        # setup the DAOS python API
        self.Context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.POOL = None

        hostlist = self.params.get("test_machines",'/run/hosts/*')
        self.hostfile = WriteHostFile.WriteHostFile(hostlist, self.tmp)
        print("Host file is: {}".format(self.hostfile))

        ServerUtils.runServer(self.hostfile, self.server_group, self.basepath)
        time.sleep(10)

    def tearDown(self):
        if self.hostfile is not None:
            os.remove(self.hostfile)
        if self.POOL is not None and self.POOL.attached:
            self.POOL.destroy(1)

        ServerUtils.stopServer()

    def test_createasync(self):
        """
        Test container create for asynchronous mode.

        :avocado: tags=container,containerasync,createasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode",'/run/createtests/createmode/*/')
        createsetid = self.params.get("setname",'/run/createtests/createset/')
        createsize  = self.params.get("size",'/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.POOL = DaosPool(self.Context)

            self.POOL.create(createmode, createuid, creategid,
                    createsize, createsetid, None)

            poh = self.POOL.handle

            self.POOL.connect(1 << 1)

            # Container initialization and creation
            self.Container1 = DaosContainer(self.Context)
            self.Container2 = DaosContainer(self.Context)


            GLOB_SIGNAL = threading.Event()
            self.Container1.create(poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print ("RC after successful Container create: " , GLOB_RC)

            # Try to recreate container after destroying pool,
            # this should fail. Checking rc after failure.
            self.POOL.destroy(1)
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.Container2.create(poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != -1005:
                self.fail("RC not as expected in async test")
            print ("RC after Container create failed:", GLOB_RC)

            # cleanup the Pool and Container
            self.POOL = None

        except ValueError as e:
            print e
            print traceback.format_exc()

    def test_destroyasync(self):
        """
        Test container destroy for asynchronous mode.

        :avocado: tags=container,containerasync,destroyasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode",'/run/createtests/createmode/*/')
        createsetid = self.params.get("setname",'/run/createtests/createset/')
        createsize  = self.params.get("size",'/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.POOL = DaosPool(self.Context)

            self.POOL.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            poh = self.POOL.handle

            self.POOL.connect(1 << 1)

            # Container initialization and creation
            self.Container1 = DaosContainer(self.Context)
            self.Container2 = DaosContainer(self.Context)

            self.Container1.create(poh)

            GLOB_SIGNAL = threading.Event()
            self.Container1.destroy(1, poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print ("RC after successful Container create: " , GLOB_RC)

            # Try to destroy container again, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.Container2.destroy(1, poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != -1003:
                self.fail("RC not as expected in async test")
            print ("RC after Container destroy failed:", GLOB_RC)

            # cleanup the Pool and Container
            self.POOL.disconnect()
            self.POOL.destroy(1)
            self.POOL = None

        except ValueError as e:
            print e
            print traceback.format_exc()

    def test_openasync(self):
        """
        Test container open for asynchronous mode.

        :avocado: tags=container,containerasync,openasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode",'/run/createtests/createmode/*/')
        createsetid = self.params.get("setname",'/run/createtests/createset/')
        createsize  = self.params.get("size",'/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.POOL = DaosPool(self.Context)

            self.POOL.create(createmode, createuid, creategid,
                            createsize, createsetid, None)

            poh = self.POOL.handle

            self.POOL.connect(1 << 1)

            # Container initialization and creation
            self.Container1 = DaosContainer(self.Context)
            self.Container2 = DaosContainer(self.Context)

            self.Container1.create(poh)

            str_cuuid = self.Container1.get_uuid_str()
            cuuid = uuid.UUID(str_cuuid)
            coh = self.Container1.coh

            GLOB_SIGNAL = threading.Event()
            self.Container1.open(poh, cuuid, 2, coh, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print ("RC after successful Container create: " , GLOB_RC)

            # Try to open container2, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.Container2.open(None, None, None, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != -1003:
                self.fail("RC not as expected in async test")
            print ("RC after Container destroy failed:", GLOB_RC)

            # cleanup the Pool and Container
            self.Container1.close()
            self.Container1.destroy()
            self.POOL.disconnect()
            self.POOL.destroy(1)
            self.POOL = None

        except ValueError as e:
            print e
            print traceback.format_exc()

    def test_closeasync(self):
        """
        Test container close for asynchronous mode.

        :avocado: tags=container,containerasync.closeasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode",'/run/createtests/createmode/*/')
        createsetid = self.params.get("setname",'/run/createtests/createset/')
        createsize  = self.params.get("size",'/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.POOL = DaosPool(self.Context)

            self.POOL.create(createmode, createuid, creategid,
                            createsize, createsetid, None)


            poh = self.POOL.handle

            self.POOL.connect(1 << 1)

            # Container initialization and creation
            self.Container1 = DaosContainer(self.Context)
            self.Container2 = DaosContainer(self.Context)

            self.Container1.create(poh)

            str_cuuid = self.Container1.get_uuid_str()
            cuuid = uuid.UUID(str_cuuid)
            coh = self.Container1.coh

            self.Container1.open(poh, cuuid, 2, coh)

            GLOB_SIGNAL = threading.Event()
            self.Container1.close(coh, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test:{0}".format(GLOB_RC))
            print ("RC after successful Container create: " , GLOB_RC)

            # Try to open container2, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.Container2.close(coh, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != -1002:
                self.fail("RC not as expected in async test:{0}".format(GLOB_RC))
            print ("RC after Container destroy failed:", GLOB_RC)

            # cleanup the Pool and Container
            self.Container1.destroy()
            self.POOL.disconnect()
            self.POOL.destroy(1)
            self.POOL = None

        except ValueError as e:
            print e
            print traceback.format_exc()

    def test_queryasync(self):
        """
        Test container query for asynchronous mode.

        :avocado: tags=container,containerasync,queryasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode",'/run/createtests/createmode/*/')
        createsetid = self.params.get("setname",'/run/createtests/createset/')
        createsize  = self.params.get("size",'/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.POOL = DaosPool(self.Context)

            self.POOL.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            poh = self.POOL.handle

            self.POOL.connect(1 << 1)

            # Container initialization and creation
            self.Container1 = DaosContainer(self.Context)
            self.Container2 = DaosContainer(self.Context)

            self.Container1.create(poh)

            str_cuuid = self.Container1.get_uuid_str()
            cuuid = uuid.UUID(str_cuuid)
            coh = self.Container1.coh

            # Open Container
            self.Container1.open(poh, None, 2, None, coh)

            GLOB_SIGNAL = threading.Event()
            self.Container1.query(coh, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test:{0}".format(GLOB_RC))
            print ("RC after successful Container create: " , GLOB_RC)

            # Close opened Container
            self.Container1.close(coh)

            # Try to open container2, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.Container2.query(coh, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != -1002:
                self.fail("RC not as expected in async test:{0}".format(GLOB_RC))
            print ("RC after Container destroy failed:", GLOB_RC)

            # cleanup the Pool and Container
            self.Container1.destroy()
            self.POOL.disconnect()
            self.POOL.destroy(1)
            self.POOL = None

        except ValueError as e:
            print e
            print traceback.format_exc()
Example #7
0
    def test_array_obj(self):
        """
        Test ID: DAOS-961

        Test Description: Writes an array to an object and then reads it
        back and verifies it.

        :avocado: tags=object,arrayobj,regression,vm,small
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/pool_params/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/pool_params/createset/')
            createsize = self.params.get("size", '/run/pool_params/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            self.plog.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            container.query()

            if container.get_uuid_str() != c_uuid_to_str(
                    container.info.ci_uuid):
                self.fail("Container UUID did not match the one in info\n")

            # create an object and write some data into it
            thedata = []
            thedata.append("data string one")
            thedata.append("data string two")
            thedata.append("data string tre")
            dkey = "this is the dkey"
            akey = "this is the akey"

            self.plog.info("writing array to dkey >%s< akey >%s<.", dkey, akey)
            oid, epoch = container.write_an_array_value(thedata, dkey, akey,
                                                        obj_cls=3)

            # read the data back and make sure its correct
            length = len(thedata[0])
            thedata2 = container.read_an_array(len(thedata), length+1,
                                               dkey, akey, oid, epoch)
            if thedata[0][0:length-1] != thedata2[0][0:length-1]:
                self.plog.error("Data mismatch")
                self.plog.error("Wrote: >%s<", thedata[0])
                self.plog.error("Read: >%s<", thedata2[0])
                self.fail("Write data, read it back, didn't match\n")

            if thedata[2][0:length-1] != thedata2[2][0:length-1]:
                self.plog.error("Data mismatch")
                self.plog.error("Wrote: >%s<", thedata[2])
                self.plog.error("Read: >%s<", thedata2[2])
                self.fail("Write data, read it back, didn't match\n")

            container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            container.destroy()

            # cleanup the pool
            pool.disconnect()
            pool.destroy(1)
            self.plog.info("Test Complete")

        except DaosApiError as excep:
            self.plog.error("Test Failed, exception was thrown.")
            print(excep)
            print(traceback.format_exc())
            self.fail("Test was expected to pass but it failed.\n")
Example #8
0
class ContainerAsync(TestWithServers):
    """
    Tests DAOS pool connect permissions (non existing pool handle, bad uuid)
    and close.

    :avocado: recursive
    """

    def __init__(self, *args, **kwargs):
        super(ContainerAsync, self).__init__(*args, **kwargs)
        self.container1 = None
        self.container2 = None
        self.pool = None

    def test_createasync(self):
        """
        Test container create for asynchronous mode.

        :avocado: tags=all,small,full_regression,container,createasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)


            GLOB_SIGNAL = threading.Event()
            self.container1.create(poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print("RC after successful container create: ", GLOB_RC)

            # Try to recreate container after destroying pool,
            # this should fail. Checking rc after failure.
            self.pool.destroy(1)
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.create(poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test")
            print("RC after unsuccessful container create: ", GLOB_RC)

            # cleanup the pool and container
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())

    def test_destroyasync(self):
        """
        Test container destroy for asynchronous mode.

        :avocado: tags=all,small,full_regression,container,contdestroyasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)

            self.container1.create(poh)

            GLOB_SIGNAL = threading.Event()
            self.container1.destroy(1, poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print("RC after successful container create: ", GLOB_RC)

            # Try to destroy container again, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.destroy(1, poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != -1003:
                self.fail("RC not as expected in async test")
            print("RC after container destroy failed:", GLOB_RC)

            # cleanup the pool and container
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())

    def test_openasync(self):
        """
        Test container open for asynchronous mode.

        :avocado: tags=all,small,full_regression,container,openasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)

            self.container1.create(poh)

            str_cuuid = self.container1.get_uuid_str()
            cuuid = uuid.UUID(str_cuuid)

            GLOB_SIGNAL = threading.Event()
            self.container1.open(poh, cuuid, 2, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print("RC after successful container create: ", GLOB_RC)

            # Try to open container2, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.open(None, None, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test")
            print("RC after container destroy failed:", GLOB_RC)

            # cleanup the pool and container
            self.container1.close()
            self.container1.destroy()
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())

    def test_closeasync(self):
        """
        Test container close for asynchronous mode.

        :avocado: tags=all,small,full_regression,container,closeasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)


            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)

            self.container1.create(poh)

            str_cuuid = self.container1.get_uuid_str()
            cuuid = uuid.UUID(str_cuuid)

            self.container1.open(poh, cuuid, 2)

            GLOB_SIGNAL = threading.Event()
            self.container1.close(cb_func=cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test: "
                          "{0}".format(GLOB_RC))
            print("RC after successful container create: ", GLOB_RC)

            # Try to open container2, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.close(cb_func=cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test: "
                          "{0}".format(GLOB_RC))
            print("RC after container destroy failed:", GLOB_RC)

            # cleanup the pool and container
            self.container1.destroy()
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())

    def test_queryasync(self):
        """
        Test container query for asynchronous mode.

        :avocado: tags=all,small,full_regression,container,queryasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)

            self.container1.create(poh)

            dummy_str_cuuid = self.container1.get_uuid_str()

            # Open container
            self.container1.open(poh, None, 2, None)

            GLOB_SIGNAL = threading.Event()
            self.container1.query(cb_func=cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test: "
                          "{0}".format(GLOB_RC))
            print("RC after successful container create: ", GLOB_RC)

            # Close opened container
            self.container1.close()

            # Try to open container2, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.query(cb_func=cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test: "
                          "{0}".format(GLOB_RC))
            print("RC after container destroy failed:", GLOB_RC)

            # cleanup the pool and container
            self.container1.destroy()
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
Example #9
0
class Snapshot(TestWithServers):
    """
    Epic: DAOS-2249 Create system level tests that cover basic snapshot
          functionality.
    Testcase:
          DAOS-1370 Basic snapshot test
          DAOS-1386 Test container SnapShot information
          DAOS-1371 Test list snapshots
          DAOS-1395 Test snapshot destroy
          DAOS-1402 Test creating multiple snapshots

    Test Class Description:
          Start DAOS servers, set up the pool and container for the above
          snapshot Epic and Testcases, including snapshot basic, container
          information, list, creation and destroy.
    :avocado: recursive
    """
    def setUp(self):
        """
        set up method
        """
        super(Snapshot, self).setUp()
        # get parameters from yaml file
        createmode = self.params.get("mode", '/run/poolparams/createmode/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/poolparams/createset/')
        createsize = self.params.get("size", '/run/poolparams/createsize/')
        self.log.info("==In setUp, self.context= %s", self.context)

        try:

            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None)

            # need a connection to the pool with rw permission
            #    DAOS_PC_RO = int(1 << 0)
            #    DAOS_PC_RW = int(1 << 1)
            #    DAOS_PC_EX = int(1 << 2)
            self.pool.connect(1 << 1)

            # create a container
            self.container = DaosContainer(self.context)
            self.container.create(self.pool.handle)

        except DaosApiError as error:
            self.log.info("Error detected in DAOS pool container setup: %s",
                          str(error))
            self.log.info(traceback.format_exc())
            self.fail("##Test failed on setUp, before snapshot taken")

        # now open it
        self.container.open()

        # do a query and compare the UUID returned from create with
        # that returned by query
        self.container.query()

        if self.container.get_uuid_str() != c_uuid_to_str(
                self.container.info.ci_uuid):
            self.fail("##Container UUID did not match the one in info.")

    def tearDown(self):
        """
        tear down method
        """
        try:
            if self.container:
                self.container.close()

            if self.container:
                self.container.destroy()

            # cleanup the pool
            if self.pool:
                self.pool.disconnect()
                self.pool.destroy(1)

        except DaosApiError as excep:
            self.log.info(excep)
            self.log.info(traceback.format_exc())
            self.fail("##Snapshot test failed on cleanUp.")

        finally:
            super(Snapshot, self).tearDown()

    def display_snapshot(self, snapshot):
        """
        To display the snapshot information.
        Args:
            snapshot: snapshot handle to be displayed.
        Return:
            none.
        """
        self.log.info("==display_snapshot================")
        self.log.info("snapshot=                 %s", snapshot)
        self.log.info("snapshot.context=         %s", snapshot.context)
        self.log.info("snapshot.context.libdaos= %s", snapshot.context.libdaos)
        self.log.info("snapshot.context.libtest= %s", snapshot.context.libtest)
        self.log.info("snapshot.context.ftable= %s", snapshot.context.ftable)
        self.log.info("snapshot.context.ftable[list-attr]= %s",
                      snapshot.context.ftable["list-attr"])
        self.log.info("snapshot.context.ftable[test-event]=%s",
                      snapshot.context.ftable["test-event"])
        self.log.info("snapshot.name=  %s", snapshot.name)
        self.log.info("snapshot.epoch= %s", snapshot.epoch)
        self.log.info("==================================")

    def take_snapshot(self, container, epoch):
        """
        To take a snapshot on the container with current epoch.

        Args:
            container: container for the snapshot
            epoch: the container epoch for the snapshot
        Return:
            An object representing the snapshot
        """
        self.log.info("==Taking snapshot for:")
        self.log.info("    coh=   %s", container.coh)
        self.log.info("    epoch= %s", epoch)
        snapshot = DaosSnapshot(self.context)
        snapshot.create(container.coh, epoch)
        self.display_snapshot(snapshot)
        return snapshot

    def invalid_snapshot_test(self, coh, epoch):
        """
        Negative snapshot test with invalid container handle or epoch.

        Args:
            container: container for the snapshot
            epoch: the container epoch for the snapshot
        Return:
            0: Failed
            1: Passed (expected failure detected)
        """
        status = 0
        try:
            snapshot = DaosSnapshot(self.context)
            snapshot.create(coh, epoch)
        except Exception as error:
            self.log.info("==>Negative test, expected error: %s", str(error))
            status = 1
        return status

    def test_snapshot_negativecases(self):
        """
        Test ID: DAOS-1390 Verify snap_create bad parameter behavior.
                 DAOS-1322 Create a new container, verify snapshot state.
                           as expected for a brand new container.
                 DAOS-1392 Verify snap_destroy bad parameter behavior.
                 DAOS-1388 Verify snap_list bad parameter behavior.
        Test Description:
                (0)Take a snapshot of the newly created container.
                (1)Create an object, write random data into it, and take
                   a snapshot.
                (2)Verify the snapshot is working properly.
                (3)Test snapshot with an invalid container handle.
                (4)Test snapshot with a NULL container handle.
                (5)Test snapshot with an invalid epoch.
                (6)Verify snap_destroy with a bad parameter.
                (7)Verify snap_list bad parameter behavior.

        Use Cases: Combinations with minimun 1 client and 1 server.
        :avocado: tags=snap,snapshot_negative,snapshotcreate_negative
        """

        #DAOS-1322 Create a new container, verify snapshot state as expected
        #          for a brand new container.
        try:
            self.log.info(
                "==(0)Take a snapshot of the newly created container.")
            snapshot = DaosSnapshot(self.context)
            snapshot.create(self.container.coh, 0)
            self.display_snapshot(snapshot)
        except Exception as error:
            self.fail("##(0)Error on a snapshot on a new container %s",
                      str(error))

        #(1)Create an object, write some data into it, and take a snapshot
        obj_cls = self.params.get("obj_class", '/run/object_class/*')
        akey = self.params.get("akey", '/run/snapshot/*', default="akey")
        dkey = self.params.get("dkey", '/run/snapshot/*', default="dkey")
        data_size = self.params.get("test_datasize",
                                    '/run/snapshot/*',
                                    default=150)
        rand_str = lambda n: ''.join(
            [random.choice(string.lowercase) for i in xrange(n)])
        thedata = "--->>>Happy Daos Snapshot-Create Negative Testing " + \
                  "<<<---" + rand_str(random.randint(1, data_size))
        try:
            obj, epoch = self.container.write_an_obj(thedata,
                                                     len(thedata) + 1,
                                                     dkey,
                                                     akey,
                                                     obj_cls=obj_cls)
        except DaosApiError as error:
            self.fail("##(1)Test failed during the initial object write: %s",
                      str(error))
        obj.close()
        ##Take a snapshot of the container
        snapshot = self.take_snapshot(self.container, epoch)
        self.log.info("==(1)Container epoch= %s", epoch)
        self.log.info("     snapshot.epoch= %s", snapshot.epoch)

        #(2)Verify the snapshot is working properly.
        try:
            obj.open()
            snap_handle = snapshot.open(self.container.coh, snapshot.epoch)
            thedata2 = self.container.read_an_obj(
                len(thedata) + 1, dkey, akey, obj, snap_handle.value)
        except Exception as error:
            self.fail("##(2)Error when retrieving the snapshot data: %s",
                      str(error))
        self.log.info("==(2)snapshot_list[ind]=%s", snapshot)
        self.log.info("==snapshot.epoch=  %s", snapshot.epoch)
        self.log.info("==written thedata=%s", thedata)
        self.log.info("==thedata2.value= %s", thedata2.value)
        if thedata2.value != thedata:
            raise Exception("##(2)The data in the snapshot is not the "
                            "same as the original data")
        self.log.info("==Snapshot data matches the data originally "
                      "written.")

        #(3)Test snapshot with an invalid container handle
        self.log.info("==(3)Snapshot with an invalid container handle.")
        if self.invalid_snapshot_test(self.container, epoch):
            self.log.info(
                "==>Negative test 1, expecting failed on taking "
                "snapshot with an invalid container.coh: %s", self.container)
        else:
            self.fail(
                "##(3)Negative test 1 passing, expecting failed on"
                " taking snapshot with an invalid container.coh: %s",
                self.container)

        #(4)Test snapshot with a NULL container handle
        self.log.info("==(4)Snapshot with a NULL container handle.")
        if self.invalid_snapshot_test(None, epoch):
            self.log.info("==>Negative test 2, expecting failed on taking "
                          "snapshot on a NULL container.coh.")
        else:
            self.fail("##(4)Negative test 2 passing, expecting failed on "
                      "taking snapshot with a NULL container.coh.")

        #(5)Test snapshot with an invalid epoch
        self.log.info("==(5)Snapshot with a NULL epoch.")
        if self.invalid_snapshot_test(self.container.coh, None):
            self.log.info("==>Negative test 3, expecting failed on taking "
                          "snapshot with a NULL epoch.")
        else:
            self.fail("##(5)Negative test 3 passing, expecting failed on "
                      "taking snapshot with a NULL epoch.")

        #(6)DAOS-1392 destroy snapshot with an invalid handle
        self.log.info(
            "==(6)DAOS-1392 destroy snapshot with an invalid handle.")
        try:
            snapshot.destroy(None, epoch)
            self.fail("##(6)Negative test destroy snapshot with an "
                      "invalid coh handle, expected fail, shown Passing##")
        except Exception as error:
            self.log.info(
                "==>Negative test, destroy snapshot with an invalid handle.")
            self.log.info("   Expected Error: %s", str(error))
            expected_error = "RC: -1002"
            if expected_error not in str(error):
                self.fail("##(6.1)Expecting error RC: -1002  did not show.")

        #(7)DAOS-1388 Verify snap_list bad parameter behavior
        self.log.info(
            "==(7)DAOS-1388 Verify snap_list bad parameter behavior.")
        try:
            snapshot.list(None, 0)
            self.fail("##(7)Negative test snapshot list with an "
                      "invalid coh and epoch, expected fail, shown Passing##")
        except Exception as error:
            self.log.info(
                "==>Negative test, snapshot list with an invalid coh.")
            self.log.info("   Expected Error: %s", str(error))
            expected_error = "RC: -1002"
            if expected_error not in str(error):
                self.fail("##(7.1)Expecting error RC: -1002  did not show.")

    def test_snapshots(self):
        """
        Test ID: DAOS-1386 Test container SnapShot information
                 DAOS-1371 Test list snapshots
                 DAOS-1395 Test snapshot destroy
                 DAOS-1402 Test creating multiple snapshots
        Test Description:
                (1)Create an object, write random data into it, and take
                   a snapshot.
                (2)Make changes to the data object. The write_an_obj function
                   does a commit when the update is complete.
                (3)Verify the data in the snapshot is the original data.
                   Get a handle for the snapshot and read the object at dkey,
                   akey. Compare it to the originally written data.
                (4)List the snapshot and make sure it reflects the original
                   epoch.
                   ==>Repeat step(1) to step(4) for multiple snapshot tests.
                (5)Verify the snapshots data.
                (6)Destroy the snapshot.
                (7)Check if still able to Open the destroyed snapshot and
                   Verify the snapshot removed from the snapshot list.
        Use Cases: Require 1 client and 1 server to run snapshot test.
                   1 pool and 1 container is used, num_of_snapshot defined
                   in the snapshot.yaml will be performed and verified.
        :avocado: tags=snap,snapshots
        """

        coh_list = []
        container_epoch_list = []
        snapshot_list = []
        test_data = []
        snapshot_index = 0
        obj_cls = self.params.get("obj_class", '/run/object_class/*')
        akey = self.params.get("akey", '/run/snapshot/*', default="akey")
        dkey = self.params.get("dkey", '/run/snapshot/*', default="dkey")
        data_size = self.params.get("test_datasize",
                                    '/run/snapshot/*',
                                    default=150)
        snapshot_loop = self.params.get("num_of_snapshot",
                                        '/run/snapshot/*',
                                        default=10)
        rand_str = lambda n: ''.join(
            [random.choice(string.lowercase) for i in xrange(n)])
        #
        #Test loop for creat, modify and snapshot object in the DAOS container.
        #
        while snapshot_index < snapshot_loop:
            #(1)Create an object, write some data into it, and take a snapshot
            #size = random.randint(1, 100) + 1
            snapshot_index += 1
            thedata = "--->>>Happy Daos Snapshot Testing " + \
                str(snapshot_index) + \
                "<<<---" + rand_str(random.randint(1, data_size))
            datasize = len(thedata) + 1
            try:
                obj, epoch = self.container.write_an_obj(thedata,
                                                         datasize,
                                                         dkey,
                                                         akey,
                                                         obj_cls=obj_cls)
                obj.close()
            except DaosApiError as error:
                self.fail("##Test failed during the initial object write: %s",
                          str(error))
            #Take a snapshot of the container
            snapshot = DaosSnapshot(self.context)
            snapshot.create(self.container.coh, epoch)
            self.log.info("==Wrote an object and created a snapshot")

            #Display snapshot
            substep = "1." + str(snapshot_index)
            self.log.info("==(1)Test step %s", substep)
            self.log.info("==self.container epoch=     %s", epoch)
            self.log.info("==snapshot.epoch= %s", snapshot.epoch)
            self.display_snapshot(snapshot)

            #Save snapshot test data
            coh_list.append(self.container.coh)
            container_epoch_list.append(epoch)
            snapshot_list.append(snapshot)
            test_data.append(thedata)

            #(2)Make changes to the data object. The write_an_obj function does
            #   a commit when the update is complete
            more_transactions = 100
            self.log.info(
                "==(2)Committing %d additional transactions to "
                "the same KV.", more_transactions)
            while more_transactions:
                size = random.randint(1, 250) + 1
                new_data = rand_str(size)
                try:
                    new_obj, _ = self.container.write_an_obj(new_data,
                                                             size,
                                                             dkey,
                                                             akey,
                                                             obj_cls=obj_cls)
                    new_obj.close()
                except Exception as error:
                    self.fail(
                        "##Test failed during the write of multi-objects: %s",
                        str(error))
                more_transactions -= 1

            #(3)Verify the data in the snapshot is the original data.
            #   Get a handle for the snapshot and read the object at dkey, akey
            #   Compare it to the originally written data.
            self.log.info("==(3)snapshot test loop: %s", snapshot_index)
            try:
                obj.open()
                snap_handle = snapshot.open(self.container.coh, snapshot.epoch)
                thedata3 = self.container.read_an_obj(datasize, dkey, akey,
                                                      obj, snap_handle.value)
            except Exception as error:
                self.fail("##Error when retrieving the snapshot data: %s",
                          str(error))
            self.log.info("==container_epoch= %s", epoch)
            self.log.info("==snapshot_list[ind]=%s", snapshot)
            self.log.info("==snapshot.epoch=  %s", snapshot.epoch)
            self.log.info("==written thedata size= %s", len(thedata) + 1)
            self.log.info("==written thedata=%s", thedata)
            self.log.info("==thedata3.value= %s", thedata3.value)
            if thedata3.value != thedata:
                raise Exception("##The data in the snapshot is not the "
                                "same as the original data")
            self.log.info("==The snapshot data matches the data originally"
                          " written.")

            #(4)List the snapshot and make sure it reflects the original epoch
            try:
                reported_epoch = snapshot.list(self.container.coh, epoch)
            except Exception as error:
                self.fail("##Test was unable to list the snapshot: %s",
                          str(error))
            self.log.info("==(4)List snapshot reported_epoch=%s",
                          reported_epoch)
            self.log.info("     snapshot.epoch=%s", snapshot.epoch)
            ##self.log.info("tickets already assigned DAOS-2390 DAOS-2392")
            #if snapshot.epoch != reported_epoch:
            #    raise Exception("##The snapshot epoch returned from "
            #        "snapshot list is not the same as the original"
            #        "epoch list is not the same as the original epoch"
            #        "snapshotted.")
            self.log.info("==After 10 additional commits the snapshot is "
                          "still available")

        #(5)Verify the snapshots data
        for ind in range(0, len(container_epoch_list)):
            epoch = container_epoch_list[ind]
            current_ss = snapshot_list[ind]
            datasize = len(test_data[ind]) + 1
            try:
                obj.open()
                snap_handle = snapshot.open(self.container.coh,
                                            current_ss.epoch)
            except Exception as error:
                self.fail("##Error when retrieving the snapshot data: %s",
                          str(error))
            ##self.log.info("tickets already assigned DAOS-2484 and DAOS-2557")
            #thedata3 = self.container.read_an_obj(datasize, dkey, akey, obj,
            #                                  snap_handle.value)
            #self.log.info("==(5)snapshot test list %s:".format(ind+1))
            #self.log.info("==container_epoch_list[ind]=%s"\
            #              .format(epoch))
            #self.log.info("==snapshot_list[ind]=%s"\
            #              .format(snapshot_list[ind]))
            #self.log.info("==snapshot_list[ind].epoch=%s"\
            #              .format( current_ss.epoch))
            #self.log.info("==test_data_size=    %s".format(datasize))
            #self.log.info("==thedata3.value=         %s"\
            #              .format(thedata3.value))
            #self.log.info("==test_data[ind]=    %s"\
            #              .format( test_data[ind]))
            #if thedata3.value != test_data[ind]:
            #    raise Exception("##The data in the snapshot is not "
            #                    "same as the original data")
            #self.log.info("The snapshot data matches the data originally "
            #              "written.")

        #(6)Destroy the snapshot
        self.log.info("==(6)Destroy snapshot  epoch: %s", epoch)
        try:
            snapshot.destroy(self.container.coh, epoch)
            self.log.info("==Snapshot successfully destroyed")
        except Exception as error:
            self.fail("##Error on snapshot.destroy: %s", str(error))

        #(7)Check if still able to Open the destroyed snapshot and
        #    Verify the snapshot removed from the snapshot list
        try:
            obj.open()
            snap_handle3 = snapshot.open(self.container.coh, snapshot.epoch)
            thedata3 = self.container.read_an_obj(datasize, dkey, akey, obj,
                                                  snap_handle3.value)
        except Exception as error:
            self.fail("##(7)Error when retrieving the 2nd snapshot data: %s",
                      str(error))
        self.log.info("-->thedata_after_snapshot.destroyed.value= %s",
                      thedata3.value)
        self.log.info("==>snapshot_epoch=     %s", snapshot.epoch)
        self.log.info("-->snapshot.list(self.container.coh, epoch)=%s",
                      snapshot.list(self.container.coh, epoch))
        #self.cancel("tickets already assigned DAOS-2390 DAOS-2392")
        #Still able to open the snapshot and read data after destroyed.
        self.log.info("==(7)DAOS container SnapshotInfo test passed")

        # Now destroy the snapshot
        try:
            snapshot.destroy(self.container.coh)
            self.log.info("==Snapshot successfully destroyed")
        except Exception as error:
            self.fail("##Error on snapshot.destroy: %s", str(error))
Example #10
0
    def test_container_basics(self):
        """
        Test basic container create/destroy/open/close/query.  Nothing fancy
        just making sure they work at a rudimentary level

        :avocado: tags=container,containercreate,containerdestroy,basecont
        """

        hostfile = None

        try:
            self.hostlist = self.params.get("test_machines",'/run/hosts/*')
            hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.tmp)

            ServerUtils.runServer(hostfile, self.server_group, self.basepath)

            # give it time to start
            time.sleep(2)

            # parameters used in pool create
            createmode = self.params.get("mode",'/run/conttests/createmode/')
            createuid  = self.params.get("uid",'/run/conttests/createuid/')
            creategid  = self.params.get("gid",'/run/conttests/creategid/')
            createsetid = self.params.get("setname",'/run/conttests/createset/')
            createsize  = self.params.get("size",'/run/conttests/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            POOL = DaosPool(self.Context)
            POOL.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # need a connection to create container
            POOL.connect(1 << 1)

            # create a container
            CONTAINER = DaosContainer(self.Context)
            CONTAINER.create(POOL.handle)

            # now open it
            CONTAINER.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            CONTAINER.query()

            if CONTAINER.get_uuid_str() != c_uuid_to_str(
                    CONTAINER.info.ci_uuid):
                self.fail("Container UUID did not match the one in info'n")

            CONTAINER.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            CONTAINER.destroy()

            # cleanup the pool
            POOL.disconnect()
            POOL.destroy(1)

        except DaosApiError as e:
            print(e)
            print(traceback.format_exc())
            self.fail("Test was expected to pass but it failed.\n")
        except Exception as e:
            self.fail("Daos code segfaulted most likely, error: %s" % e)
        finally:
            try:
                if hostfile is not None:
                    os.remove(hostfile)
            finally:
                ServerUtils.stopServer(hosts=self.hostlist)
Example #11
0
    def test_epoch_basics(self):
        """
        Perform I/O to an object in a container in 2 different epochs,
        verifying basic I/O and epochs in particular.

        :avocado: tags=container,epoch,basicepoch
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode",'/run/conttests/createmode/')
            createuid  = self.params.get("uid",'/run/conttests/createuid/')
            creategid  = self.params.get("gid",'/run/conttests/creategid/')
            createsetid = self.params.get("setname",'/run/conttests/createset/')
            createsize  = self.params.get("size",'/run/conttests/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            POOL = DaosPool(self.Context)
            POOL.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # need a connection to create container
            POOL.connect(1 << 1)

            # create a container
            CONTAINER = DaosContainer(self.Context)
            CONTAINER.create(POOL.handle)

            # now open it
            CONTAINER.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            CONTAINER.query()

            if CONTAINER.get_uuid_str() != c_uuid_to_str(
                    CONTAINER.info.ci_uuid):
                self.fail("Container UUID did not match the one in info\n")

            # create an object and write some data into it
            thedata = "a string that I want to stuff into an object"
            thedatasize = 45
            dkey = "this is the dkey"
            akey = "this is the akey"

            oid, epoch = CONTAINER.write_an_obj(thedata, thedatasize,
                                                dkey, akey)

            # read the data back and make sure its correct
            thedata2 = CONTAINER.read_an_obj(thedatasize, dkey, akey,
                                             oid, epoch)
            if thedata != thedata2.value:
                print("thedata>" + thedata)
                print("thedata2>" + thedata2.value)
                self.fail("Write data 1, read it back, didn't match\n")

            # repeat above, but know that the write_an_obj call is advancing
            # the epoch so the original copy remains and the new copy is in
            # a new epoch.
            thedata3 = "a different string"
            thedatasize2 = 19
            # note using the same keys so writing to the same spot
            dkey = "this is the dkey"
            akey = "this is the akey"

            oid, epoch2 = CONTAINER.write_an_obj(thedata3, thedatasize2,
                                                 dkey, akey, oid)

            # read the data back and make sure its correct
            thedata4 = CONTAINER.read_an_obj(thedatasize2, dkey, akey,
                                             oid, epoch2)
            if thedata3 != thedata4.value:
                self.fail("Write data 2, read it back, didn't match\n")

            # the original data should still be there too
            thedata5 = CONTAINER.read_an_obj(thedatasize, dkey, akey,
                                             oid, epoch)
            if thedata != thedata5.value:
                self.fail("Write data 3, read it back, didn't match\n")

            CONTAINER.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            CONTAINER.destroy()

            # cleanup the pool
            POOL.disconnect()
            POOL.destroy(1)

        except DaosApiError as e:
            print(e)
            print(traceback.format_exc())
            self.fail("Test was expected to pass but it failed.\n")
Example #12
0
class ContainerAsync(Test):
    """
    Tests DAOS pool connect permissions (non existing pool handle, bad uuid)
    and close.
    """

    def __init__(self, *args, **kwargs):
        super(ContainerAsync, self).__init__(*args, **kwargs)
        self.container1 = None
        self.container2 = None

    def setUp(self):
        self.agent_sessions = None
        self.hostlist = None
        self.pool = None

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.pool = None

        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)
        print("Host file is: {}".format(self.hostfile))

        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)
        time.sleep(10)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            time.sleep(5)
            server_utils.stop_server(hosts=self.hostlist)

    def test_createasync(self):
        """
        Test container create for asynchronous mode.

        :avocado: tags=container,containerasync,createasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)


            GLOB_SIGNAL = threading.Event()
            self.container1.create(poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print("RC after successful container create: ", GLOB_RC)

            # Try to recreate container after destroying pool,
            # this should fail. Checking rc after failure.
            self.pool.destroy(1)
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.create(poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test")
            print("RC after unsuccessful container create: ", GLOB_RC)

            # cleanup the pool and container
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())

    def test_destroyasync(self):
        """
        Test container destroy for asynchronous mode.

        :avocado: tags=container,containerasync,contdestroyasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)

            self.container1.create(poh)

            GLOB_SIGNAL = threading.Event()
            self.container1.destroy(1, poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print("RC after successful container create: ", GLOB_RC)

            # Try to destroy container again, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.destroy(1, poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != -1003:
                self.fail("RC not as expected in async test")
            print("RC after container destroy failed:", GLOB_RC)

            # cleanup the pool and container
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())

    def test_openasync(self):
        """
        Test container open for asynchronous mode.

        :avocado: tags=container,containerasync,openasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)

            self.container1.create(poh)

            str_cuuid = self.container1.get_uuid_str()
            cuuid = uuid.UUID(str_cuuid)

            GLOB_SIGNAL = threading.Event()
            self.container1.open(poh, cuuid, 2, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print("RC after successful container create: ", GLOB_RC)

            # Try to open container2, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.open(None, None, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test")
            print("RC after container destroy failed:", GLOB_RC)

            # cleanup the pool and container
            self.container1.close()
            self.container1.destroy()
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())

    def test_closeasync(self):
        """
        Test container close for asynchronous mode.

        :avocado: tags=container,containerasync,closeasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)


            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)

            self.container1.create(poh)

            str_cuuid = self.container1.get_uuid_str()
            cuuid = uuid.UUID(str_cuuid)

            self.container1.open(poh, cuuid, 2)

            GLOB_SIGNAL = threading.Event()
            self.container1.close(cb_func=cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test: "
                          "{0}".format(GLOB_RC))
            print("RC after successful container create: ", GLOB_RC)

            # Try to open container2, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.close(cb_func=cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test: "
                          "{0}".format(GLOB_RC))
            print("RC after container destroy failed:", GLOB_RC)

            # cleanup the pool and container
            self.container1.destroy()
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())

    def test_queryasync(self):
        """
        Test container query for asynchronous mode.

        :avocado: tags=container,containerasync,queryasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)

            self.container1.create(poh)

            dummy_str_cuuid = self.container1.get_uuid_str()

            # Open container
            self.container1.open(poh, None, 2, None)

            GLOB_SIGNAL = threading.Event()
            self.container1.query(cb_func=cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test: "
                          "{0}".format(GLOB_RC))
            print("RC after successful container create: ", GLOB_RC)

            # Close opened container
            self.container1.close()

            # Try to open container2, this should fail, as non-existent.
            # Checking rc after failure.
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.query(cb_func=cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test: "
                          "{0}".format(GLOB_RC))
            print("RC after container destroy failed:", GLOB_RC)

            # cleanup the pool and container
            self.container1.destroy()
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
Example #13
0
    def test_tx_basics(self):
        """
        Perform I/O to an object in a container in 2 different transactions,
        verifying basic I/O and transactions in particular.

        NOTE: this was an epoch test and all I did was get it working with tx
        Not a good test at this point, need to redesign when tx is fully
        working.

        :avocado: tags=container,tx,basictx
        """
        pool = None

        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/poolparams/createmode/')
            createuid = os.geteuid()
            creategid = os.getegid()
            createsetid = self.params.get("setname",
                                          '/run/poolparams/createset/')
            createsize = self.params.get("size", '/run/poolparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid, createsize,
                        createsetid, None)

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)

            # now open it
            container.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            container.query()

            if container.get_uuid_str() != c_uuid_to_str(
                    container.info.ci_uuid):
                self.fail("Container UUID did not match the one in info\n")

            # create an object and write some data into it
            thedata = "a string that I want to stuff into an object"
            thedatasize = 45
            dkey = "this is the dkey"
            akey = "this is the akey"

            oid, txn = container.write_an_obj(thedata, thedatasize, dkey, akey,
                                              None, None, 2)

            # read the data back and make sure its correct
            thedata2 = container.read_an_obj(thedatasize, dkey, akey, oid, txn)
            if thedata != thedata2.value:
                print("thedata>" + thedata)
                print("thedata2>" + thedata2.value)
                self.fail("Write data 1, read it back, didn't match\n")

            # repeat above, but know that the write_an_obj call is advancing
            # the epoch so the original copy remains and the new copy is in
            # a new epoch.
            thedata3 = "a different string"
            thedatasize2 = 19
            # note using the same keys so writing to the same spot
            dkey = "this is the dkey"
            akey = "this is the akey"

            oid, tx2 = container.write_an_obj(thedata3, thedatasize2, dkey,
                                              akey, oid, None, 2)

            # read the data back and make sure its correct
            thedata4 = container.read_an_obj(thedatasize2, dkey, akey, oid,
                                             tx2)
            if thedata3 != thedata4.value:
                self.fail("Write data 2, read it back, didn't match\n")

            # transactions generally don't work this way but need to explore
            # an alternative to below code once model is complete, maybe
            # read from a snapshot or read from TX_NONE etc.

            # the original data should still be there too
            #thedata5 = container.read_an_obj(thedatasize, dkey, akey,
            #                                 oid, transaction)
            #if thedata != thedata5.value:
            #    self.fail("Write data 3, read it back, didn't match\n")

            container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            container.destroy()

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test was expected to pass but it failed.\n")
        finally:
            # cleanup the pool
            if pool is not None:
                pool.disconnect()
                pool.destroy(1)