コード例 #1
0
ファイル: global_handle.py プロジェクト: daos-stack/daos
def check_handle(buf_len, iov_len, buf, uuidstr, rank):
    """
    This gets run in a child process and verifyes the global
    handle can be turned into a local handle in another process.
    """

    try:
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)

        # setup the DAOS python API in this process
        context = DaosContext(build_paths['PREFIX'] + '/lib/')

        pool = DaosPool(context)
        pool.set_uuid_str(uuidstr)
        pool.set_svc(rank)
        pool.group = "daos_server"

        # note that the handle is stored inside the pool as well
        dummy_local_handle = pool.global2local(context, iov_len, buf_len, buf)

        # perform some operations that will use the new handle
        pool.pool_query()
        container = DaosContainer(context)
        container.create(pool.handle)

    except DaosApiError as excep:
        print(excep)
        print(traceback.format_exc())
        raise

    return
コード例 #2
0
ファイル: soak.py プロジェクト: morsiee/daos
    def create_pool(self):
        """
        Creates a pool that the various tests use for storage.
        """

        createmode = self.params.get("mode", '/run/pool1/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool1/createset/')
        createsize = self.params.get("size", '/run/pool1/createsize/')
        self.createsvc = self.params.get("svcn", '/run/pool1/createsvc/')

        self.pool = DaosPool(self.context)
        self.pool.create(createmode, createuid, creategid, createsize,
                         createsetid, None, None, self.createsvc)
コード例 #3
0
ファイル: destroy_tests.py プロジェクト: daos-stack/daos
    def test_many_servers(self):
        """
        Test destroy on a large (relative) number of servers.

        :avocado: tags=pool,pooldestroy,destroybig
        """
        try:
            # write out a hostfile and start the servers with it
            self.hostlist = self.params.get("test_machines6", '/run/hosts/')
            hostfile = write_host_file.write_host_file(self.hostlist, self.tmp)

            self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                       self.hostlist)
            server_utils.run_server(hostfile, self.server_group, self.basepath)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/poolparams/createmode/')
            createuid = self.params.get("uid", '/run/poolparams/createuid/')
            creategid = self.params.get("gid", '/run/poolparams/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/poolparams/createset/')
            createsize = self.params.get("size", '/run/poolparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            time.sleep(1)

            # okay, get rid of it
            pool.destroy(1)

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("6 server test failed.\n")

        except Exception as excep:
            self.fail("Daos code segfaulted most likely.  Error: %s" % excep)

        # no matter what happens cleanup
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)
            os.remove(hostfile)
コード例 #4
0
    def setUp(self):
        self.pool = None
        self.container = None
        self.obj = None
        self.ioreq = None
        self.hostlist = None
        self.hostfile = None
        self.no_of_dkeys = None
        self.no_of_akeys = None
        self.array_size = None
        self.record_length = None

        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)
        basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        server_group = self.params.get("server_group", '/server/',
                                       'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = WriteHostFile.WriteHostFile(self.hostlist,
                                                    self.workdir)
        self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0]
        self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0]
        self.array_size = self.params.get("size", '/array_size/')
        self.record_length = self.params.get("length", '/run/record/*')

        ServerUtils.runServer(self.hostfile, server_group, basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(), os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         None)
        self.pool.connect(2)

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.handle)
        self.container.open()

        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj,
                               objtype=4)
コード例 #5
0
ファイル: basic_snapshot.py プロジェクト: morsiee/daos
    def setUp(self):

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as finput:
            build_paths = json.load(finput)
        basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        server_group = self.params.get("name", '/server_config/',
                                       'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)

        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        hostfile = write_host_file.write_host_file(self.hostlist, self.workdir)

        server_utils.run_server(hostfile, server_group, basepath)

        # Set up the pool and container.
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/pool/createmode/')
            createsetid = self.params.get("setname", '/run/pool/createset/')
            createsize = self.params.get("size", '/run/pool/createsize/*')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None)

            # need a connection to create container
            self.pool.connect(1 << 1)

            # create a container
            self.container = DaosContainer(self.context)
            self.container.create(self.pool.handle)

            # now open it
            self.container.open()

        except DaosApiError as error:
            print(error)
            print(traceback.format_exc())
            self.fail("Test failed before snapshot taken")
コード例 #6
0
ファイル: destroy_rebuild.py プロジェクト: daos-stack/daos
    def setUp(self):
        """ setup for the test """
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        # generate a hostfile
        self.hostlist = self.params.get("test_machines", '/run/hosts/')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        # fire up the DAOS servers
        self.server_group = self.params.get("name", '/run/server_config/',
                                            'daos_server')
        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group,
                                build_paths['PREFIX'] + '/../')

        # create a pool to test with
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = self.params.get("uid", '/run/pool/createuid/')
        creategid = self.params.get("gid", '/run/pool/creategid/')
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        self.pool = DaosPool(self.context)
        self.pool.create(createmode, createuid, creategid, createsize,
                         createsetid)
        self.pool.get_uuid_str()

        time.sleep(2)
コード例 #7
0
ファイル: create_many_dkeys.py プロジェクト: daos-stack/daos
    def setUp(self):
        self.agent_sessions = None
        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)
        basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        server_group = self.params.get("name",
                                       '/server_config/',
                                       'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.container = None
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist)
        server_utils.run_server(self.hostfile, server_group, basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(),
                         os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         None)
        self.pool.connect(1 << 1)
コード例 #8
0
    def setUp(self):
        super(ContainerAttributeTest, self).setUp()

        self.large_data_set = {}

        self.pool = DaosPool(self.context)
        self.pool.create(
            self.params.get("mode", '/run/attrtests/createmode/*'),
            os.geteuid(), os.getegid(),
            self.params.get("size", '/run/attrtests/createsize/*'),
            self.params.get("setname", '/run/attrtests/createset/*'), None)
        self.pool.connect(1 << 1)
        poh = self.pool.handle
        self.container = DaosContainer(self.context)
        self.container.create(poh)
        self.container.open()
コード例 #9
0
ファイル: container_async.py プロジェクト: daos-stack/daos
    def test_createasync(self):
        """
        Test container create for asynchronous mode.

        :avocado: tags=container,containerasync,createasync
        """

        global GLOB_SIGNAL
        global GLOB_RC

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createuid = os.geteuid()
        creategid = os.getegid()

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)

            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle

            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container2 = DaosContainer(self.context)


            GLOB_SIGNAL = threading.Event()
            self.container1.create(poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC != 0:
                self.fail("RC not as expected in async test")
            print("RC after successful container create: ", GLOB_RC)

            # Try to recreate container after destroying pool,
            # this should fail. Checking rc after failure.
            self.pool.destroy(1)
            GLOB_SIGNAL = threading.Event()
            GLOB_RC = -9900000
            self.container2.create(poh, None, cb_func)

            GLOB_SIGNAL.wait()
            if GLOB_RC == 0:
                self.fail("RC not as expected in async test")
            print("RC after unsuccessful container create: ", GLOB_RC)

            # cleanup the pool and container
            self.pool = None

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
コード例 #10
0
    def setUp(self):
        # get paths from the build_vars generated by build
        with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                "../../../../.build_vars.json")) as f:
            build_paths = json.load(f)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        self.server_group = self.params.get("server_group", '/server/',
                                            'daos_default_oops')

        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.pool = DaosPool(self.context)
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("test_machines1", '/hosts/')
        self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.workdir)
        ServerUtils.runServer(self.hostfile, self.server_group, self.basepath)
コード例 #11
0
ファイル: eight_servers_mpiio.py プロジェクト: morsiee/daos
    def executable(self, iorflags=None):
        """
        Executable function to run ior for ssf and fpp
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createscm_size = self.params.get("scm_size", '/run/pool/createsize/')
        createnvme_size = self.params.get("nvme_size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        block_size = self.params.get("b", '/run/ior/transfersize_blocksize/*/')
        transfer_size = self.params.get("t",
                                        '/run/ior/transfersize_blocksize/*/')

        try:
            # initialize MpioUtils
            self.mpio = MpioUtils()
            if self.mpio.mpich_installed(self.hostlist_clients) is False:
                self.fail("Exiting Test: Mpich not installed")

            #print self.mpio.mpichinstall
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createscm_size,
                             createsetid, None, None, createsvc,
                             createnvme_size)

            pool_uuid = self.pool.get_uuid_str()
            svc_list = ""
            for i in range(createsvc):
                svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":"
            svc_list = svc_list[:-1]

            print("svc_list: {}".format(svc_list))

            ior_utils.run_ior_mpiio(self.basepath, self.mpio.mpichinstall,
                                    pool_uuid, svc_list, self.num_procs,
                                    self.hostfile_clients, iorflags, iteration,
                                    transfer_size, block_size, True)

        except (DaosApiError, MpioFailed) as excep:
            print(excep)
コード例 #12
0
ファイル: ior_single_server.py プロジェクト: morsiee/daos
    def test_singleserver(self):
        """
        Test IOR with Single Server config.

        :avocado: tags=ior,singleserver
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createsvc = self.params.get("svcn", '/run/createtests/createsvc/')

        # ior parameters
        client_processes = self.params.get("np",
                                           '/run/ior/client_processes/*/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        ior_flags = self.params.get("F", '/run/ior/iorflags/')
        transfer_size = self.params.get("t",
                                        '/run/ior/transfersize_blocksize/*/')
        block_size = self.params.get("b", '/run/ior/transfersize_blocksize/*/')
        object_class = self.params.get("o", '/run/ior/objectclass/')

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None, None, createsvc)
            pool_uuid = self.pool.get_uuid_str()
            print("pool_uuid: {}".format(pool_uuid))
            tmp_rank_list = []
            svc_list = ""
            for item in range(createsvc):
                tmp_rank_list.append(int(self.pool.svc.rl_ranks[item]))
                svc_list += str(tmp_rank_list[item]) + ":"
            svc_list = svc_list[:-1]

            ior_utils.run_ior_daos(self.hostfile_clients, ior_flags, iteration,
                                   block_size, transfer_size, pool_uuid,
                                   svc_list, object_class, self.basepath,
                                   client_processes)

        except (DaosApiError, ior_utils.IorFailed) as excep:
            self.fail("<Single Server Test FAILED>\n {}".format(excep))
コード例 #13
0
ファイル: snapshot.py プロジェクト: the-original-dude/daos-1
    def setUp(self):
        """
        set up method
        """
        super(Snapshot, self).setUp()
        # get parameters from yaml file
        createmode = self.params.get("mode", '/run/poolparams/createmode/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/poolparams/createset/')
        createsize = self.params.get("size", '/run/poolparams/createsize/')
        self.log.info("==In setUp, self.context= %s", self.context)

        try:

            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None)

            # need a connection to the pool with rw permission
            #    DAOS_PC_RO = int(1 << 0)
            #    DAOS_PC_RW = int(1 << 1)
            #    DAOS_PC_EX = int(1 << 2)
            self.pool.connect(1 << 1)

            # create a container
            self.container = DaosContainer(self.context)
            self.container.create(self.pool.handle)

        except DaosApiError as error:
            self.log.info("Error detected in DAOS pool container setup: %s",
                          str(error))
            self.log.info(traceback.format_exc())
            self.fail("##Test failed on setUp, before snapshot taken")

        # now open it
        self.container.open()

        # do a query and compare the UUID returned from create with
        # that returned by query
        self.container.query()

        if self.container.get_uuid_str() != c_uuid_to_str(
                self.container.info.ci_uuid):
            self.fail("##Container UUID did not match the one in info.")
コード例 #14
0
ファイル: GlobalHandle.py プロジェクト: tone-zhang/daos
def CheckHandle(pool_glob_handle, uuidstr, cont_glob_handle, rank):
    """
    This gets run in a child process and verifyes the global
    handles can be turned into local handles in another process.
    """

    try:
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as f:
            build_paths = json.load(f)

        # setup the DAOS python API in this process
        context = DaosContext(build_paths['PREFIX'] + '/lib/')

        # setup the pool and connect using global handle
        pool = DaosPool(context)
        pool.uuid = uuidstr
        pool.set_svc(rank)
        pool.group = "daos_server"
        buf = ctypes.cast(
            pool_glob_handle.iov_buf,
            ctypes.POINTER(ctypes.c_byte * pool_glob_handle.iov_buf_len))
        buf2 = bytearray()
        buf2.extend(buf.contents)
        pool_handle = pool.global2local(context, pool_glob_handle.iov_len,
                                        pool_glob_handle.iov_buf_len, buf2)

        # perform an operation that will use the new handle, if it
        # doesn't throw an exception, then all is well.
        pool.pool_query()

        # setup the container and then connect using the global handle
        container = DaosContainer(context)
        container.poh = pool_handle
        buf = ctypes.cast(
            cont_glob_handle.iov_buf,
            ctypes.POINTER(ctypes.c_byte * cont_glob_handle.iov_buf_len))
        buf2 = bytearray()
        buf2.extend(buf.contents)
        cont_handle = container.global2local(context, cont_glob_handle.iov_len,
                                             cont_glob_handle.iov_buf_len,
                                             buf2)
        # just try one thing to make sure handle is good
        container.query()

    except DaosApiError as e:
        print(e)
        print(traceback.format_exc())
        raise

    return
コード例 #15
0
    def test_connectpermission(self):
        """
        Test pool connections with specific permissions.
        :avocado: tags=pool,permission,connectpermission
        """
        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')

        # parameters used for pool connect
        permissions = self.params.get("perm", '/run/createtests/permissions/*')

        if (createmode == 73):
            expected_result = 'FAIL'
        if (createmode == 511 and permissions == 0):
            expected_result = 'PASS'
        elif (createmode in [146, 511] and permissions == 1):
            expected_result = 'PASS'
        elif (createmode in [292, 511] and permissions == 2):
            expected_result = 'PASS'
        else:
            expected_result = 'FAIL'

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.Context)
            self.d_log.debug("Pool initialisation successful")

            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None)
            self.d_log.debug("Pool Creation successful")

            self.pool.connect(1 << permissions)
            self.d_log.debug("Pool Connect successful")

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as e:
            print(e)
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
コード例 #16
0
    def test_destroy_connect(self):
        """
        Test destroying a pool that has a connected client with force == false.
        Should fail.

        :avocado: tags=pool,pooldestroy,x
        """

        try:

            # write out a hostfile and start the servers with it
            hostlist = self.params.get("test_machines1", '/run/hosts/')
            hostfile = WriteHostFile.WriteHostFile(hostlist, self.tmp)

            ServerUtils.runServer(hostfile, self.server_group, self.basepath)

            # give it time to reach steady state
            time.sleep(1)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/poolparams/createmode/')
            createuid = self.params.get("uid", '/run/poolparams/createuid/')
            creategid = self.params.get("gid", '/run/poolparams/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/poolparams/createset/')
            createsize = self.params.get("size", '/run/poolparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            POOL = DaosPool(self.Context)
            POOL.create(createmode, createuid, creategid, createsize,
                        createsetid, None)

            # need a connection to create container
            POOL.connect(1 << 1)

            # destroy pool with connection open
            POOL.destroy(0)

            # should throw an exception and not hit this
            self.fail("Shouldn't hit this line.\n")

        except ValueError as e:
            print("got exception which is expected so long as it is BUSY")
            print(e)
            print(traceback.format_exc())
            # pool should still be there
            exists = CheckForPool.checkForPool(host, uuid_str)
            if exists != 0:
                self.fail("Pool gone, but destroy should have failed.\n")

        # no matter what happens cleanup
        finally:
            ServerUtils.stopServer()
            os.remove(hostfile)
コード例 #17
0
ファイル: global_handle.py プロジェクト: daos-stack/daos
def check_handle(pool_glob_handle, uuidstr, cont_glob_handle, rank):
    """
    This gets run in a child process and verifyes the global
    handles can be turned into local handles in another process.
    """

    try:
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)

        # setup the DAOS python API in this process
        context = DaosContext(build_paths['PREFIX'] + '/lib/')


        # setup the pool and connect using global handle
        pool = DaosPool(context)
        pool.uuid = uuidstr
        pool.set_svc(rank)
        pool.group = "daos_server"
        buf = ctypes.cast(pool_glob_handle.iov_buf,
                          ctypes.POINTER(ctypes.c_byte *
                                         pool_glob_handle.iov_buf_len))
        buf2 = bytearray()
        buf2.extend(buf.contents)
        pool_handle = pool.global2local(context,
                                        pool_glob_handle.iov_len,
                                        pool_glob_handle.iov_buf_len,
                                        buf2)

        # perform an operation that will use the new handle, if it
        # doesn't throw an exception, then all is well.
        pool.pool_query()

        # setup the container and then connect using the global handle
        container = DaosContainer(context)
        container.poh = pool_handle
        buf = ctypes.cast(cont_glob_handle.iov_buf,
                          ctypes.POINTER(ctypes.c_byte *
                                         cont_glob_handle.iov_buf_len))
        buf2 = bytearray()
        buf2.extend(buf.contents)
        dummy_cont_handle = container.global2local(context,
                                                   cont_glob_handle.iov_len,
                                                   cont_glob_handle.iov_buf_len,
                                                   buf2)
        # just try one thing to make sure handle is good
        container.query()

    except DaosApiError as excep:
        print(excep)
        print(traceback.format_exc())
        raise

    return
コード例 #18
0
ファイル: eight_servers.py プロジェクト: morsiee/daos
    def executable(self, iorflags=None):
        """
        Executable function to run ior for sequential and random order
        """

        # parameters used in pool create
        createmode = self.params.get("mode_RW", '/run/pool/createmode/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')

        # ior parameters
        client_processes = self.params.get("np", '/run/ior/clientslots/*')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        block_size = self.params.get("b", '/run/ior/transfersize_blocksize/*/')
        object_class = self.params.get("o", '/run/ior/objectclass/*/')
        transfer_size = self.params.get("t",
                                        '/run/ior/transfersize_blocksize/*/')

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            #print("self.context:{}".format(self.context))
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None, None, createsvc)

            pool_uuid = self.pool.get_uuid_str()
            svc_list = ""
            for item in range(createsvc):
                svc_list += str(int(self.pool.svc.rl_ranks[item])) + ":"
            svc_list = svc_list[:-1]

            print("svc_list: {}".format(svc_list))

            ior_utils.run_ior_daos(self.hostfile_clients, iorflags, iteration,
                                   block_size, transfer_size, pool_uuid,
                                   svc_list, object_class, self.basepath,
                                   client_processes)

        except (DaosApiError, ior_utils.IorFailed) as excep:
            print(excep)
            self.fail("Test was expected to pass but it failed.\n")
コード例 #19
0
ファイル: InfoTests.py プロジェクト: chen0qi/daos
    def setUp(self):
        # get paths from the build_vars generated by build
        with open(
                os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             "../../../../.build_vars.json")) as f:
            build_paths = json.load(f)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        self.tmp = build_paths['PREFIX'] + '/tmp'
        self.server_group = self.params.get("server_group", '/server/',
                                            'daos_server')

        context = DaosContext(build_paths['PREFIX'] + '/lib/')
        print("initialized!!!\n")

        self.pool = DaosPool(context)
        self.hostlist = self.params.get("test_machines1", '/run/hosts/')
        self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.tmp)
        ServerUtils.runServer(self.hostfile, self.server_group, self.basepath)
コード例 #20
0
ファイル: metadata.py プロジェクト: mchaarawi/daos
    def setUp(self):
        """Set up each test case."""
        # Start the servers and agents
        super(ObjectMetadata, self).setUp()

        # Recreate the client hostfile without slots defined
        self.hostfile_clients = write_host_file(self.hostlist_clients,
                                                self.workdir, None)

        # Create a pool
        self.pool = DaosPool(self.context)
        self.pool.create(
            self.params.get("mode", '/run/pool/createmode/*'), os.geteuid(),
            os.getegid(), self.params.get("scm_size",
                                          '/run/pool/createsize/*'),
            self.params.get("setname", '/run/pool/createset/*'), None, None,
            self.params.get("svcn", '/run/pool/createsvc/*'),
            self.params.get("nvme_size", '/run/pool/createsize/*'))
コード例 #21
0
    def create(self):
        """Create a pool.

        Destroys an existing pool if defined and assigns self.pool and
        self.uuid.
        """
        self.destroy()
        self.log.info("Creating a pool")
        self.pool = DaosPool(self.context)
        kwargs = {
            "mode": self.mode.value, "uid": self.uid, "gid": self.gid,
            "scm_size": self.scm_size.value, "group": self.name.value}
        for key in ("target_list", "svcn", "nvme_size"):
            value = getattr(self, key).value
            if value:
                kwargs[key] = value
        self._call_method(self.pool.create, kwargs)
        self.uuid = self.pool.get_uuid_str()
コード例 #22
0
ファイル: RebuildNoCap.py プロジェクト: fetr0509/daos
    def setUp(self):
        """ setup for the test """

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as f:
            build_paths = json.load(f)
        self.CONTEXT = DaosContext(build_paths['PREFIX'] + '/lib/')

        # generate a hostfile
        self.host_list = self.params.get("test_machines", '/run/hosts/')
        tmp = build_paths['PREFIX'] + '/tmp'
        self.hostfile = WriteHostFile.WriteHostFile(self.host_list, tmp)

        # fire up the DAOS servers
        self.server_group = self.params.get("server_group", '/run/server/',
                                            'daos_server')
        ServerUtils.runServer(self.hostfile, self.server_group,
                              build_paths['PREFIX'] + '/../')
        time.sleep(3)

        # create a pool to test with
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = self.params.get("uid", '/run/pool/createuid/')
        creategid = self.params.get("gid", '/run/pool/creategid/')
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        self.POOL = DaosPool(self.CONTEXT)
        self.POOL.create(createmode, createuid, creategid, createsize,
                         createsetid)
        uuid = self.POOL.get_uuid_str()

        time.sleep(2)

        # stuff some bogus data into the pool
        how_many_bytes = long(
            self.params.get("datasize", '/run/testparams/datatowrite/'))
        exepath = build_paths['PREFIX'] +\
                  "/../src/tests/ftest/util/WriteSomeData.py"
        cmd = "export DAOS_POOL={0}; export DAOS_SVCL=1; mpirun"\
              " --np 1 --host {1} {2} {3} testfile".format(
                  uuid, self.host_list[0], exepath, how_many_bytes)
        subprocess.call(cmd, shell=True)
コード例 #23
0
    def setUp(self):
        super(PoolAttributeTest, self).setUp()

        self.large_data_set = {}

        createmode = self.params.get("mode", '/run/attrtests/createmode/')
        createuid = os.geteuid()
        creategid = os.getgid()
        createsetid = self.params.get("setname", '/run/attrtests/createset/')
        createsize = self.params.get("size", '/run/attrtests/createsize/')
        try:
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid)
            self.pool.connect(1 << 1)

        except DaosApiError as excep:
            print("In the setup exception handler\n")
            print(excep)
            print(traceback.format_exc())
コード例 #24
0
ファイル: destroy_tests.py プロジェクト: daos-stack/daos
    def test_destroy_connect(self):
        """
        Test destroying a pool that has a connected client with force == false.
        Should fail.

        :avocado: tags=pool,pooldestroy,x
        """
        host = self.hostlist[0]
        try:

            # write out a hostfile and start the servers with it
            self.hostlist = self.params.get("test_machines1", '/run/hosts/')
            hostfile = write_host_file.write_host_file(self.hostlist, self.tmp)

            self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                       self.hostlist)
            server_utils.run_server(hostfile, self.server_group, self.basepath)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/poolparams/createmode/')
            createuid = self.params.get("uid", '/run/poolparams/createuid/')
            creategid = self.params.get("gid", '/run/poolparams/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/poolparams/createset/')
            createsize = self.params.get("size", '/run/poolparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # need a connection to create container
            pool.connect(1 << 1)

            # destroy pool with connection open
            pool.destroy(0)

            # should throw an exception and not hit this
            self.fail("Shouldn't hit this line.\n")

        except DaosApiError as excep:
            print("got exception which is expected so long as it is BUSY")
            print(excep)
            print(traceback.format_exc())
            # pool should still be there
            exists = check_for_pool.check_for_pool(host, pool.get_uuid_str)
            if exists != 0:
                self.fail("Pool gone, but destroy should have failed.\n")

        # no matter what happens cleanup
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)
            os.remove(hostfile)
コード例 #25
0
ファイル: general_utils.py プロジェクト: mchaarawi/daos
def get_pool(context, mode, size, name, svcn=1, log=None, connect=True):
    """Return a DAOS pool that has been created an connected.

    Args:
        context (DaosContext): the context to use to create the pool
        mode (int): the pool mode
        size (int): the size of the pool
        name (str): the name of the pool
        svcn (int): the pool service leader quantity
        log (DaosLog, optional): object for logging messages. Defaults to None.
        connect (bool, optional): connect to the new pool. Defaults to True.

    Returns:
        DaosPool: an object representing a DAOS pool

    """
    if log:
        log.info("Creating a pool")
    pool = DaosPool(context)
    pool.create(mode, os.geteuid(), os.getegid(), size, name, svcn=svcn)
    if connect:
        if log:
            log.info("Connecting to the pool")
        pool.connect(1 << 1)
    return pool
コード例 #26
0
ファイル: ior_single_server.py プロジェクト: daos-stack/daos
    def test_singleserver(self):
        """
        Test IOR with Single Server config.

        :avocado: tags=ior,singleserver
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createsvc = self.params.get("svcn", '/run/createtests/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        ior_flags = self.params.get("F", '/run/ior/iorflags/')
        transfer_size = self.params.get("t", '/run/ior/transfersize/')
        record_size = self.params.get("r", '/run/ior/recordsize/')
        segment_count = self.params.get("s", '/run/ior/segmentcount/')
        stripe_count = self.params.get("c", '/run/ior/stripecount/')
        async_io = self.params.get("a", '/run/ior/asyncio/')
        object_class = self.params.get("o", '/run/ior/objectclass/')

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None, None, createsvc)
            pool_uuid = self.pool.get_uuid_str()
            print ("pool_uuid: {}".format(pool_uuid))
            tmp_rank_list = []
            svc_list = ""
            for i in range(createsvc):
                tmp_rank_list.append(int(self.pool.svc.rl_ranks[i]))
                svc_list += str(tmp_rank_list[i]) + ":"
            svc_list = svc_list[:-1]

            if len(self.hostlist_clients) == 1:
                block_size = '12g'
            elif len(self.hostlist_clients) == 2:
                block_size = '6g'
            elif len(self.hostlist_clients) == 4:
                block_size = '3g'

            ior_utils.run_ior(self.hostfile_clients, ior_flags, iteration,
                              block_size, transfer_size, pool_uuid, svc_list,
                              record_size, segment_count, stripe_count,
                              async_io, object_class, self.basepath)

        except (DaosApiError, ior_utils.IorFailed) as excep:
            self.fail("<Single Server Test FAILED>\n {}".format(excep))
コード例 #27
0
ファイル: info_tests.py プロジェクト: morsiee/daos
    def setUp(self):
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               "../../../../.build_vars.json")) as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.pool = DaosPool(context)
        self.d_log = DaosLog(context)
        self.hostlist_servers = self.params.get("test_machines1",
                                                '/run/hosts/')
        self.hostfile_servers = write_host_file.write_host_file(
            self.hostlist_servers, self.workdir)
        self.agent_sessions = agent_utils.run_agent(self.basepath,
                                                    self.hostlist_servers)
        server_utils.run_server(self.hostfile_servers, self.server_group,
                                self.basepath)
コード例 #28
0
    def test_global_handle(self):
        """
        Test ID: DAO

        Test Description: Use a pool handle in another process.

        :avocado: tags=all,pool,pr,tiny,poolglobalhandle
        """

        try:

            # use the uid/gid of the user running the test, these should
            # be perfectly valid
            createuid = os.geteuid()
            creategid = os.getegid()

            # parameters used in pool create that are in yaml
            createmode = self.params.get("mode", '/run/testparams/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/testparams/createset/')
            createsize = self.params.get("size", '/run/testparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            pool.connect(1 << 1)

            # create a container just to make sure handle is good
            container = DaosContainer(self.context)
            container.create(pool.handle)

            # create a global handle
            iov_len, buf_len, buf = pool.local2global()

            # this should work in the future but need on-line server addition
            #arg_list = (buf_len, iov_len, buf, pool.get_uuid_str(), 0)
            #p = Process(target=check_handle, args=arg_list)
            #p.start()
            #p.join()
            # for now verifying global handle in the same process which is not
            # the intended use case
            check_handle(buf_len, iov_len, buf, pool.get_uuid_str(), 0)

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")
コード例 #29
0
ファイル: object_integrity.py プロジェクト: daos-stack/daos
    def setUp(self):
        self.agent_sessions = None
        self.pool = None
        self.container = None
        self.obj = None
        self.ioreq = None
        self.hostlist = None
        self.hostfile = None
        self.no_of_dkeys = None
        self.no_of_akeys = None
        self.array_size = None
        self.record_length = None

        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)
        basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        server_group = self.params.get("name",
                                       '/server_config/',
                                       'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)
        self.no_of_dkeys = self.params.get("no_of_dkeys", '/run/dkeys/*')[0]
        self.no_of_akeys = self.params.get("no_of_akeys", '/run/akeys/*')[0]
        self.array_size = self.params.get("size", '/array_size/')
        self.record_length = self.params.get("length", '/run/record/*')

        self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist)
        server_utils.run_server(self.hostfile, server_group, basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(),
                         os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         None)
        self.pool.connect(2)

        self.container = DaosContainer(self.context)
        self.container.create(self.pool.handle)
        self.container.open()

        self.obj = DaosObj(self.context, self.container)
        self.obj.create(objcls=1)
        self.obj.open()
        self.ioreq = IORequest(self.context,
                               self.container,
                               self.obj, objtype=4)
コード例 #30
0
ファイル: metadata.py プロジェクト: morsiee/daos
    def setUp(self):
        self.agent_sessions = None
        self.pool = None
        self.hostlist_servers = None
        self.hostfile_clients = None
        self.hostfile_servers = None
        self.out_queue = None
        self.pool_connect = True

        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)

        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)
        self.hostlist_servers = self.params.get("servers", '/run/hosts/*')
        self.hostfile_servers = write_host_file.write_host_file(
            self.hostlist_servers, self.workdir)
        self.hostlist_clients = self.params.get("clients", '/run/hosts/*')
        self.hostfile_clients = write_host_file.write_host_file(
            self.hostlist_clients, self.workdir)
        self.agent_sessions = agent_utils.run_agent(self.basepath,
                                                    self.hostlist_servers,
                                                    self.hostlist_clients)
        server_utils.run_server(self.hostfile_servers, self.server_group,
                                self.basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(),
                         os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         nvme_size=self.params.get("size",
                                                   '/run/pool/nvmesize/*'))
コード例 #31
0
    def setUp(self):
        try:
            self.pool = None
            self.hostlist = None
            with open('../../../.build_vars.json') as build_file:
                build_paths = json.load(build_file)
                basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
                server_group = self.params.get("name",
                                               '/server_config/',
                                               'daos_server')
                context = DaosContext(build_paths['PREFIX'] + '/lib/')

                self.hostlist = self.params.get("test_machines", '/run/hosts/*')
                self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                                self.workdir)

                server_utils.run_server(self.hostfile, server_group, basepath)

                createmode = self.params.get("mode",
                                             '/run/attrtests/createmode/')
                createuid = os.geteuid()
                creategid = os.getgid()
                createsetid = self.params.get("setname",
                                              '/run/attrtests/createset/')
                createsize = self.params.get("size",
                                             '/run/attrtests/createsize/')

                self.pool = DaosPool(context)
                self.pool.create(createmode, createuid, creategid, createsize,
                                 createsetid)
                self.pool.connect(1 << 1)
                self.large_data_set = {}

        except DaosApiError as excep:
            print("In the setup exception handler\n")
            print(excep)
            print(traceback.format_exc())
コード例 #32
0
    def test_create(self):
        """
        Test basic pool creation.

        :avocado: tags=pool,poolcreate,simplecreate
        """
        # Accumulate a list of pass/fail indicators representing what is
        # expected for each parameter then "and" them to determine the
        # expected result of the test
        expected_for_param = []
        modelist = self.params.get("mode", '/run/tests/modes/*')
        mode = modelist[0]
        expected_for_param.append(modelist[1])

        uidlist = self.params.get("uid", '/run/tests/uids/*', os.geteuid())
        if uidlist[0] == 'valid':
            uid = os.geteuid()
        else:
            uid = uidlist[0]
        expected_for_param.append(uidlist[1])

        gidlist = self.params.get("gid", '/run/tests/gids/*', os.getegid())
        if gidlist[0] == 'valid':
            gid = os.getegid()
        else:
            gid = gidlist[0]
        expected_for_param.append(gidlist[1])

        setidlist = self.params.get("setname", '/run/tests/setnames/*')
        setid = setidlist[0]
        expected_for_param.append(setidlist[1])

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        try:
            self.pool = DaosPool(self.context)
            self.pool.create(mode, uid, gid, 1073741824, setid)
            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as exc:
            print(exc)
            print(traceback.format_exc())
            if expected_result not in ['FAIL']:
                self.fail("Test was expected to pass but it failed.\n")
コード例 #33
0
ファイル: punch_test.py プロジェクト: daos-stack/daos
    def setUp(self):
        self.agent_sessions = None
        try:
            # get paths from the build_vars generated by build
            with open('../../../.build_vars.json') as build_file:
                build_paths = json.load(build_file)
                self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

                self.server_group = self.params.get("name", '/server_config/',
                                                    'daos_server')

                # setup the DAOS python API
                self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

                self.hostlist = self.params.get("test_machines", '/run/hosts/*')
                self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                                self.workdir)

                self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                           self.hostlist)
                server_utils.run_server(self.hostfile, self.server_group,
                                        self.basepath)

                # parameters used in pool create
                createmode = self.params.get("mode", '/run/pool/createmode/')
                createsetid = self.params.get("setname", '/run/pool/createset/')
                createsize = self.params.get("size", '/run/pool/createsize/')

                createuid = os.geteuid()
                creategid = os.getegid()

                # initialize a python pool object then create the underlying
                # daos storage
                self.pool = DaosPool(self.context)
                self.pool.create(createmode, createuid, creategid,
                                 createsize, createsetid, None)
                self.pool.connect(1 << 1)

                # create a container
                self.container = DaosContainer(self.context)
                self.container.create(self.pool.handle)

                # now open it
                self.container.open()

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test failed during setup.\n")
コード例 #34
0
ファイル: rebuild_no_cap.py プロジェクト: morsiee/daos
    def setUp(self):
        super(RebuildNoCap, self).setUp()
        # create a pool to test with
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = self.params.get("uid", '/run/pool/createuid/')
        creategid = self.params.get("gid", '/run/pool/creategid/')
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        self.pool = DaosPool(self.context)
        self.pool.create(createmode, createuid, creategid, createsize,
                         createsetid)
        uuid = self.pool.get_uuid_str()

        time.sleep(2)

        # stuff some bogus data into the pool
        how_many_bytes = long(self.params.get("datasize",
                                              '/run/testparams/datatowrite/'))
        exepath = self.prefix +\
                 "/../src/tests/ftest/util/write_some_data.py"
        cmd = "export DAOS_POOL={0}; export DAOS_SVCL=1; mpirun"\
              " --np 1 --host {1} {2} {3} testfile".format(
                  uuid, self.hostlist_servers[0], exepath, how_many_bytes)
        subprocess.call(cmd, shell=True)
コード例 #35
0
ファイル: basic_snapshot.py プロジェクト: daos-stack/daos
    def setUp(self):

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as finput:
            build_paths = json.load(finput)
        basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        server_group = self.params.get("name", '/server_config/',
                                       'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)

        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        hostfile = write_host_file.write_host_file(self.hostlist, self.workdir)

        server_utils.run_server(hostfile, server_group, basepath)

        # Set up the pool and container.
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/pool/createmode/')
            createsetid = self.params.get("setname", '/run/pool/createset/')
            createsize = self.params.get("size", '/run/pool/createsize/*')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            # need a connection to create container
            self.pool.connect(1 << 1)

            # create a container
            self.container = DaosContainer(self.context)
            self.container.create(self.pool.handle)

            # now open it
            self.container.open()

        except DaosApiError as error:
            print(error)
            print(traceback.format_exc())
            self.fail("Test failed before snapshot taken")
コード例 #36
0
ファイル: soak.py プロジェクト: daos-stack/daos
    def create_pool(self):
        """
        Creates a pool that the various tests use for storage.
        """

        createmode = self.params.get("mode", '/run/pool1/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool1/createset/')
        createsize = self.params.get("size", '/run/pool1/createsize/')
        self.createsvc = self.params.get("svcn", '/run/pool1/createsvc/')

        self.pool = DaosPool(self.context)
        self.pool.create(createmode, createuid, creategid,
                         createsize, createsetid, None, None,
                         self.createsvc)
コード例 #37
0
    def executable(self, iorflags=None):
        """
        Executable function to run ior for ssf and fpp
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createscm_size = self.params.get("scm_size", '/run/pool/createsize/')
        createnvme_size = self.params.get("nvme_size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        block_size = self.params.get("b", '/run/ior/transfersize_blocksize/*/')
        transfer_size = self.params.get("t",
                                        '/run/ior/transfersize_blocksize/*/')

        try:
            # initialize MpioUtils
            self.mpio = MpioUtils()
            if self.mpio.mpich_installed(self.hostlist_clients) is False:
                self.fail("Exiting Test: Mpich not installed")

            #print self.mpio.mpichinstall
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createscm_size, createsetid, None, None, createsvc,
                             createnvme_size)

            pool_uuid = self.pool.get_uuid_str()
            svc_list = ""
            for i in range(createsvc):
                svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":"
            svc_list = svc_list[:-1]

            print ("svc_list: {}".format(svc_list))

            ior_utils.run_ior_mpiio(self.basepath, self.mpio.mpichinstall,
                                    pool_uuid, svc_list, self.num_procs,
                                    self.hostfile_clients, iorflags, iteration,
                                    transfer_size, block_size, True)

        except (DaosApiError, MpioFailed) as excep:
            print(excep)
コード例 #38
0
ファイル: global_handle.py プロジェクト: daos-stack/daos
    def test_global_handle(self):
        """
        Test ID: DAO

        Test Description: Use a pool handle in another process.

        :avocado: tags=pool,poolhandle,vm,small,regression
        """

        try:

            # use the uid/gid of the user running the test, these should
            # be perfectly valid
            createuid = os.geteuid()
            creategid = os.getegid()

            # parameters used in pool create that are in yaml
            createmode = self.params.get("mode", '/run/testparams/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/testparams/createset/')
            createsize = self.params.get("size", '/run/testparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            pool.connect(1 << 1)

            # create a container just to make sure handle is good
            container = DaosContainer(self.context)
            container.create(pool.handle)

            # create a global handle
            iov_len, buf_len, buf = pool.local2global()

            # this should work in the future but need on-line server addition
            #arg_list = (buf_len, iov_len, buf, pool.get_uuid_str(), 0)
            #p = Process(target=check_handle, args=arg_list)
            #p.start()
            #p.join()
            # for now verifying global handle in the same process which is not
            # the intended use case
            check_handle(buf_len, iov_len, buf, pool.get_uuid_str(), 0)

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")
コード例 #39
0
ファイル: destroy_tests.py プロジェクト: morsiee/daos
    def test_many_servers(self):
        """
        Test destroy on a large (relative) number of servers.

        :avocado: tags=pool,pooldestroy,destroybig
        """
        try:
            # write out a hostfile_servers and start the servers with it
            self.hostlist_servers = self.params.get("test_machines6",
                                                    '/run/hosts/')
            hostfile_servers = write_host_file.write_host_file(
                self.hostlist_servers, self.tmp)

            self.agent_sessions = agent_utils.run_agent(
                self.basepath, self.hostlist_servers)
            server_utils.run_server(hostfile_servers, self.server_group,
                                    self.basepath)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/poolparams/createmode/')
            createuid = self.params.get("uid", '/run/poolparams/createuid/')
            creategid = self.params.get("gid", '/run/poolparams/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/poolparams/createset/')
            createsize = self.params.get("size", '/run/poolparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid, createsize,
                        createsetid, None)

            time.sleep(1)

            # okay, get rid of it
            pool.destroy(1)

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("6 server test failed.\n")

        except Exception as excep:
            self.fail("Daos code segfaulted most likely.  Error: %s" % excep)

        # no matter what happens cleanup
        finally:
            if self.agent_sessions:
                agent_utils.stop_agent(self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist_servers)
            os.remove(hostfile_servers)
コード例 #40
0
ファイル: rebuild_no_cap.py プロジェクト: daos-stack/daos
    def setUp(self):
        """ setup for the test """
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        # generate a hostfile
        self.hostlist = self.params.get("test_machines", '/run/hosts/')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        # fire up the DAOS servers
        self.server_group = self.params.get("name", '/run/server_config/',
                                            'daos_server')
        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group,
                                build_paths['PREFIX'] + '/../')

        # create a pool to test with
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = self.params.get("uid", '/run/pool/createuid/')
        creategid = self.params.get("gid", '/run/pool/creategid/')
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        self.pool = DaosPool(self.context)
        self.pool.create(createmode, createuid, creategid, createsize,
                         createsetid)
        uuid = self.pool.get_uuid_str()

        time.sleep(2)

        # stuff some bogus data into the pool
        how_many_bytes = long(self.params.get("datasize",
                                              '/run/testparams/datatowrite/'))
        exepath = os.path.join(build_paths['PREFIX'],
                               "/../src/tests/ftest/util/write_some_data.py")
        cmd = "export DAOS_POOL={0}; export DAOS_SVCL=1; mpirun"\
              " --np 1 --host {1} {2} {3} testfile".format(
                  uuid, self.hostlist[0], exepath, how_many_bytes)
        subprocess.call(cmd, shell=True)
コード例 #41
0
    def test_many_servers(self):
        """
        Test destroy on a large (relative) number of servers.

        :avocado: tags=pool,pooldestroy,destroybig
        """
        try:
            # write out a hostfile and start the servers with it
            hostlist = self.params.get("test_machines6", '/run/hosts/')
            hostfile = WriteHostFile.WriteHostFile(hostlist, self.tmp)

            ServerUtils.runServer(hostfile, self.server_group, self.basepath)

            # give it time to reach steady state
            time.sleep(1)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/poolparams/createmode/')
            createuid = self.params.get("uid", '/run/poolparams/createuid/')
            creategid = self.params.get("gid", '/run/poolparams/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/poolparams/createset/')
            createsize = self.params.get("size", '/run/poolparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            POOL = DaosPool(self.Context)
            POOL.create(createmode, createuid, creategid, createsize,
                        createsetid, None)

            time.sleep(1)

            # okay, get rid of it
            POOL.destroy(1)

        except ValueError as e:
            print(e)
            print(traceback.format_exc())
            self.fail("6 server test failed.\n")

        except Exception as e:
            self.fail("Daos code segfaulted most likely.  Error: %s" % e)

        # no matter what happens cleanup
        finally:
            ServerUtils.stopServer()
            os.remove(hostfile)
コード例 #42
0
    def setUp(self):
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               "../../../../.build_vars.json")) as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_default_oops')

        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.cont = None
        self.cont2 = None
        self.pool = DaosPool(self.context)
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("test_machines1", '/hosts/')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)
コード例 #43
0
ファイル: attribute.py プロジェクト: daos-stack/daos
    def setUp(self):
        try:
            self.pool = None
            self.hostlist = None
            with open('../../../.build_vars.json') as build_file:
                build_paths = json.load(build_file)
                basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
                server_group = self.params.get("name",
                                               '/server_config/',
                                               'daos_server')
                context = DaosContext(build_paths['PREFIX'] + '/lib/')

                self.hostlist = self.params.get("test_machines", '/run/hosts/*')
                self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                                self.workdir)

                server_utils.run_server(self.hostfile, server_group, basepath)

                createmode = self.params.get("mode",
                                             '/run/attrtests/createmode/')
                createuid = os.geteuid()
                creategid = os.getgid()
                createsetid = self.params.get("setname",
                                              '/run/attrtests/createset/')
                createsize = self.params.get("size",
                                             '/run/attrtests/createsize/')

                self.pool = DaosPool(context)
                self.pool.create(createmode, createuid, creategid, createsize,
                                 createsetid)
                self.pool.connect(1 << 1)
                self.large_data_set = {}

        except DaosApiError as excep:
            print("In the setup exception handler\n")
            print(excep)
            print(traceback.format_exc())
コード例 #44
0
ファイル: metadata.py プロジェクト: daos-stack/daos
    def setUp(self):
        self.agent_sessions = None
        self.pool = None
        self.hostlist = None
        self.hostfile_clients = None
        self.hostfile = None
        self.out_queue = None
        self.pool_connect = True

        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)

        self.basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        self.server_group = self.params.get("name",
                                            '/server_config/',
                                            'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("servers", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)
        self.hostlist_clients = self.params.get("clients", '/run/hosts/*')
        self.hostfile_clients = (
            write_host_file.write_host_file(hostlist_clients, self.workdir))
        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist,
                                                   self.hostlist_clients)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(),
                         os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         nvme_size=self.params.get("size",
                                                   '/run/pool/nvmesize/*'))
コード例 #45
0
class OpenContainerTest(Test):
    """
    Tests DAOS container bad create (non existing pool handle, bad uuid)
    and close.

    :avocado: tags=container,containeropen
    """
    def setUp(self):

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as f:
            build_paths = json.load(f)
        self.basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        self.tmp = build_paths['PREFIX'] + '/tmp'

        self.server_group = self.params.get("server_group",'/server/',
                                           'daos_server')

        # setup the DAOS python API
        self.Context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.POOL1 = None
        self.POOL2 = None
        self.CONTAINER1 = None
        self.CONTAINER2 = None

        self.hostfile = None
        self.hostlist = self.params.get("test_machines",'/run/hosts/*')
        self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.tmp)

        # common parameters used in pool create
        self.createmode = self.params.get("mode",'/run/createtests/createmode/')
        self.createsetid = self.params.get("setname",'/run/createtests/createset/')
        self.createsize  = self.params.get("size",'/run/createtests/createsize/')

        # POOL 1 UID GID
        self.createuid1  = self.params.get("uid",'/run/createtests/createuid1/')
        self.creategid1  = self.params.get("gid",'/run/createtests/creategid1/')

        # POOL 2 UID GID
        self.createuid2  = self.params.get("uid",'/run/createtests/createuid2/')
        self.creategid2  = self.params.get("gid",'/run/createtests/creategid2/')

        ServerUtils.runServer(self.hostfile, self.server_group, self.basepath)

    def tearDown(self):
        try:
            if self.CONTAINER1 is not None:
                self.CONTAINER1.destroy()
            if self.CONTAINER2 is not None:
                self.CONTAINER2.destroy()
            if self.POOL1 is not None and self.POOL1.attached:
                self.POOL1.destroy(1)
            if self.POOL2 is not None and self.POOL2.attached:
                self.POOL2.destroy(1)
        finally:
            ServerUtils.stopServer(hosts=self.hostlist)

    def test_container_open(self):
        """
        Test basic container bad create.

        :avocado: tags=container,containeropen
        """

        expected_for_param = []
        uuidlist  = self.params.get("uuid",'/run/createtests/uuids/*/')
        containerUUID = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        pohlist = self.params.get("poh",'/run/createtests/handles/*/')
        poh = pohlist[0]
        expected_for_param.append(pohlist[1])

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        try:
            # create two pools and try to create containers in these pools
            self.POOL1 = DaosPool(self.Context)
            self.POOL1.create(self.createmode, self.createuid1, self.creategid1,
                              self.createsize, self.createsetid, None)

            self.POOL2 = DaosPool(self.Context)
            self.POOL2.create(self.createmode, self.createuid2, self.creategid2,
                              self.createsize, None, None)

            # Connect to the pools
            self.POOL1.connect(1 << 1)
            self.POOL2.connect(1 << 1)

            # defines pool handle for container open
            if pohlist[0] == 'POOL1':
                poh = self.POOL1.handle
            else:
                poh = self.POOL2.handle

            # Create a container in POOL1
            self.CONTAINER1 = DaosContainer(self.Context)
            self.CONTAINER1.create(self.POOL1.handle)

            # defines test UUID for container open
            if uuidlist[0] == 'POOL1':
                struuid = self.CONTAINER1.get_uuid_str()
                containerUUID = uuid.UUID(struuid)
            else:
                if uuidlist[0] == 'MFUUID':
                    containerUUID = "misformed-uuid-0000"
                else:
                    containerUUID = uuid.uuid4() # random uuid

            # tries to open the container1
            # open should be ok only if poh = POOL1.handle && containerUUID = CONTAINER1.uuid
            self.CONTAINER1.open(poh, containerUUID)

            # wait a few seconds and then destroy containers
            time.sleep(5)
            self.CONTAINER1.close()
            self.CONTAINER1.destroy()
            self.CONTAINER1 = None

            # cleanup the pools
            self.POOL1.disconnect()
            self.POOL1.destroy(1)
            self.POOL1 = None
            self.POOL2.disconnect()
            self.POOL2.destroy(1)
            self.POOL2 = None

            if expected_result in ['FAIL']:
                    self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as e:
            print(e)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
        finally:
            if self.hostfile is not None:
                os.remove(self.hostfile)
コード例 #46
0
    def test_container_open(self):
        """
        Test basic container bad create.

        :avocado: tags=container,containeropen
        """

        expected_for_param = []
        uuidlist  = self.params.get("uuid",'/run/createtests/uuids/*/')
        containerUUID = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        pohlist = self.params.get("poh",'/run/createtests/handles/*/')
        poh = pohlist[0]
        expected_for_param.append(pohlist[1])

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        try:
            # create two pools and try to create containers in these pools
            self.POOL1 = DaosPool(self.Context)
            self.POOL1.create(self.createmode, self.createuid1, self.creategid1,
                              self.createsize, self.createsetid, None)

            self.POOL2 = DaosPool(self.Context)
            self.POOL2.create(self.createmode, self.createuid2, self.creategid2,
                              self.createsize, None, None)

            # Connect to the pools
            self.POOL1.connect(1 << 1)
            self.POOL2.connect(1 << 1)

            # defines pool handle for container open
            if pohlist[0] == 'POOL1':
                poh = self.POOL1.handle
            else:
                poh = self.POOL2.handle

            # Create a container in POOL1
            self.CONTAINER1 = DaosContainer(self.Context)
            self.CONTAINER1.create(self.POOL1.handle)

            # defines test UUID for container open
            if uuidlist[0] == 'POOL1':
                struuid = self.CONTAINER1.get_uuid_str()
                containerUUID = uuid.UUID(struuid)
            else:
                if uuidlist[0] == 'MFUUID':
                    containerUUID = "misformed-uuid-0000"
                else:
                    containerUUID = uuid.uuid4() # random uuid

            # tries to open the container1
            # open should be ok only if poh = POOL1.handle && containerUUID = CONTAINER1.uuid
            self.CONTAINER1.open(poh, containerUUID)

            # wait a few seconds and then destroy containers
            time.sleep(5)
            self.CONTAINER1.close()
            self.CONTAINER1.destroy()
            self.CONTAINER1 = None

            # cleanup the pools
            self.POOL1.disconnect()
            self.POOL1.destroy(1)
            self.POOL1 = None
            self.POOL2.disconnect()
            self.POOL2.destroy(1)
            self.POOL2 = None

            if expected_result in ['FAIL']:
                    self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as e:
            print(e)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
        finally:
            if self.hostfile is not None:
                os.remove(self.hostfile)
コード例 #47
0
ファイル: multiple_clients.py プロジェクト: daos-stack/daos
class MultipleClients(Test):
    """
    Test class Description: Runs IOR with multiple clients.

    """
    def setUp(self):
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')
        self.daosctl = self.basepath + '/install/bin/daosctl'

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.pool = None

        self.hostlist_servers = self.params.get("test_servers",
                                                '/run/hosts/test_machines/*')
        self.hostfile_servers = (
            write_host_file.write_host_file(self.hostlist_servers,
                                            self.workdir))
        print("Host file servers is: {}".format(self.hostfile_servers))

        self.hostlist_clients = (
            self.params.get("clients",
                            '/run/hosts/test_machines/test_clients/*'))
        self.hostfile_clients = (
            write_host_file.write_host_file(self.hostlist_clients,
                                            self.workdir))
        print("Host file clientsis: {}".format(self.hostfile_clients))

        self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                   self.hostlist_servers,
                                                   self.hostlist_clients)
        server_utils.run_server(self.hostfile_servers, self.server_group,
                                self.basepath)

        if int(str(self.name).split("-")[0]) == 1:
            ior_utils.build_ior(self.basepath)

    def tearDown(self):
        try:
            if self.hostfile_clients is not None:
                os.remove(self.hostfile_clients)
            if self.hostfile_servers is not None:
                os.remove(self.hostfile_servers)
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist_clients,
                                      self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist_servers)

    def test_multipleclients(self):
        """
        Test ID: DAOS-1263
        Test Description: Test IOR with 16 and 32 clients config.
        Use Cases: Different combinations of 16/32 Clients, 8b/1k/4k
                   record size, 1m/8m stripesize and 16 async io.
        :avocado: tags=ior,twoservers,multipleclients
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        slots = self.params.get("slots", '/run/ior/clientslots/*')
        ior_flags = self.params.get("F", '/run/ior/iorflags/')
        transfer_size = self.params.get("t", '/run/ior/transfersize/')
        record_size = self.params.get("r", '/run/ior/recordsize/*')
        stripe_size = self.params.get("s", '/run/ior/stripesize/*')
        stripe_count = self.params.get("c", '/run/ior/stripecount/')
        async_io = self.params.get("a", '/run/ior/asyncio/')
        object_class = self.params.get("o", '/run/ior/objectclass/')

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None, None, createsvc)

            with open(self.hostfile_clients) as client_file:
                new_text = client_file.read().replace('slots=1',
                                                      'slots={0}').format(slots)

            with open(self.hostfile_clients, "w") as client_file:
                client_file.write(new_text)

            pool_uuid = self.pool.get_uuid_str()
            tmp_rank_list = []
            svc_list = ""
            for i in range(createsvc):
                tmp_rank_list.append(int(self.pool.svc.rl_ranks[i]))
                svc_list += str(tmp_rank_list[i]) + ":"
            svc_list = svc_list[:-1]

            if slots == 8:
                block_size = '3g'
            elif slots == 16:
                block_size = '1536m'

            if stripe_size == '8m':
                transfer_size = stripe_size

            ior_utils.run_ior(self.hostfile_clients, ior_flags, iteration,
                              block_size, transfer_size, pool_uuid, svc_list,
                              record_size, stripe_size, stripe_count, async_io,
                              object_class, self.basepath, slots)

        except (DaosApiError, ior_utils.IorFailed) as excep:
            self.fail("<MultipleClients Test run Failed>\n {}".format(excep))
コード例 #48
0
ファイル: GlobalHandle.py プロジェクト: tone-zhang/daos
    def test_global_handle(self):
        """
        Test ID: DAO

        Test Description: Use a pool handle in another process.

        :avocado: tags=container,conthandle,vm,small,regression
        """

        try:

            # use the uid/gid of the user running the test, these should
            # be perfectly valid
            createuid = os.geteuid()
            creategid = os.getegid()

            # parameters used in pool create that are in yaml
            createmode = self.params.get("mode", '/run/testparams/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/testparams/createset/')
            createsize = self.params.get("size", '/run/testparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.Context)
            pool.create(createmode, createuid, creategid, createsize,
                        createsetid, None)
            pool.connect(1 << 1)

            # create a pool global handle
            iov_len, buf_len, buf = pool.local2global()
            buftype = ctypes.c_byte * buf_len
            c_buf = buftype.from_buffer(buf)
            sct_pool_handle = sharedctypes.RawValue(
                IOV, ctypes.cast(c_buf, ctypes.c_void_p), buf_len, iov_len)

            # create a container
            container = DaosContainer(self.Context)
            container.create(pool.handle)
            container.open()

            # create a container global handle
            iov_len, buf_len, buf = container.local2global()
            buftype = ctypes.c_byte * buf_len
            c_buf = buftype.from_buffer(buf)
            sct_cont_handle = sharedctypes.RawValue(
                IOV, ctypes.cast(c_buf, ctypes.c_void_p), buf_len, iov_len)

            sct_pool_uuid = sharedctypes.RawArray(ctypes.c_byte, pool.uuid)
            # this should work in the future but need on-line server addition
            #arg_list = (
            #p = Process(target=CheckHandle, args=arg_list)
            #p.start()
            #p.join()
            # for now verifying global handle in the same process which is not
            # the intended use case
            CheckHandle(sct_pool_handle, sct_pool_uuid, sct_cont_handle, 0)

        except DaosApiError as e:
            print(e)
            print(traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")
コード例 #49
0
ファイル: multiple_clients.py プロジェクト: daos-stack/daos
    def test_multipleclients(self):
        """
        Test ID: DAOS-1263
        Test Description: Test IOR with 16 and 32 clients config.
        Use Cases: Different combinations of 16/32 Clients, 8b/1k/4k
                   record size, 1m/8m stripesize and 16 async io.
        :avocado: tags=ior,twoservers,multipleclients
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        slots = self.params.get("slots", '/run/ior/clientslots/*')
        ior_flags = self.params.get("F", '/run/ior/iorflags/')
        transfer_size = self.params.get("t", '/run/ior/transfersize/')
        record_size = self.params.get("r", '/run/ior/recordsize/*')
        stripe_size = self.params.get("s", '/run/ior/stripesize/*')
        stripe_count = self.params.get("c", '/run/ior/stripecount/')
        async_io = self.params.get("a", '/run/ior/asyncio/')
        object_class = self.params.get("o", '/run/ior/objectclass/')

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None, None, createsvc)

            with open(self.hostfile_clients) as client_file:
                new_text = client_file.read().replace('slots=1',
                                                      'slots={0}').format(slots)

            with open(self.hostfile_clients, "w") as client_file:
                client_file.write(new_text)

            pool_uuid = self.pool.get_uuid_str()
            tmp_rank_list = []
            svc_list = ""
            for i in range(createsvc):
                tmp_rank_list.append(int(self.pool.svc.rl_ranks[i]))
                svc_list += str(tmp_rank_list[i]) + ":"
            svc_list = svc_list[:-1]

            if slots == 8:
                block_size = '3g'
            elif slots == 16:
                block_size = '1536m'

            if stripe_size == '8m':
                transfer_size = stripe_size

            ior_utils.run_ior(self.hostfile_clients, ior_flags, iteration,
                              block_size, transfer_size, pool_uuid, svc_list,
                              record_size, stripe_size, stripe_count, async_io,
                              object_class, self.basepath, slots)

        except (DaosApiError, ior_utils.IorFailed) as excep:
            self.fail("<MultipleClients Test run Failed>\n {}".format(excep))
コード例 #50
0
class ContainerAttributeTest(TestWithServers):
    """
    Tests DAOS container attribute get/set/list.
    :avocado: recursive
    """
    def setUp(self):
        super(ContainerAttributeTest, self).setUp()

        self.large_data_set = {}

        self.pool = DaosPool(self.context)
        self.pool.create(
            self.params.get("mode", '/run/attrtests/createmode/*'),
            os.geteuid(), os.getegid(),
            self.params.get("size", '/run/attrtests/createsize/*'),
            self.params.get("setname", '/run/attrtests/createset/*'), None)
        self.pool.connect(1 << 1)
        poh = self.pool.handle
        self.container = DaosContainer(self.context)
        self.container.create(poh)
        self.container.open()

    def tearDown(self):
        try:
            if self.container:
                self.container.close()
        finally:
            super(ContainerAttributeTest, self).tearDown()

    def create_data_set(self):
        """
        To create the large attribute dictionary
        """
        allchar = string.ascii_letters + string.digits
        for i in range(1024):
            self.large_data_set[str(i)] = ("".join(
                random.choice(allchar) for x in range(random.randint(1, 100))))

    def test_container_attribute(self):
        """
        Test basic container attribute tests.
        :avocado: tags=container,container_attr,attribute,sync_conattribute
        """
        expected_for_param = []
        name = self.params.get("name", '/run/attrtests/name_handles/*/')
        expected_for_param.append(name[1])
        value = self.params.get("value", '/run/attrtests/value_handles/*/')
        expected_for_param.append(value[1])

        attr_dict = {name[0]: value[0]}
        if name[0] is not None:
            if "largenumberofattr" in name[0]:
                self.create_data_set()
                attr_dict = self.large_data_set
                attr_dict[name[0]] = value[0]

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break
        try:
            self.container.set_attr(data=attr_dict)
            size, buf = self.container.list_attr()

            verify_list_attr(attr_dict, size, buf)

            # Request something that doesn't exist
            if name[0] is not None and "Negative" in name[0]:
                name[0] = "rubbish"

            results = {}
            results = self.container.get_attr([name[0]])

            # for this test the dictionary has been altered, need to just
            # set it to what we are expecting to get back
            if name[0] is not None:
                if "largenumberofattr" in name[0]:
                    attr_dict.clear()
                    attr_dict[name[0]] = value[0]

            verify_get_attr(attr_dict, results)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except (DaosApiError, DaosTestError) as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")

    def test_container_attribute_asyn(self):
        """
        Test basic container attribute tests.

        :avocado: tags=container,container_attr,attribute,async_conattribute
        """
        global GLOB_SIGNAL
        global GLOB_RC

        expected_for_param = []
        name = self.params.get("name", '/run/attrtests/name_handles/*/')
        expected_for_param.append(name[1])
        value = self.params.get("value", '/run/attrtests/value_handles/*/')
        expected_for_param.append(value[1])

        attr_dict = {name[0]: value[0]}
        if name[0] is not None:
            if "largenumberofattr" in name[0]:
                self.create_data_set()
                attr_dict = self.large_data_set
                attr_dict[name[0]] = value[0]

        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break
        try:
            GLOB_SIGNAL = threading.Event()
            self.container.set_attr(data=attr_dict, cb_func=cb_func)
            GLOB_SIGNAL.wait()
            if GLOB_RC != 0 and expected_result in ['PASS']:
                self.fail("RC not as expected after set_attr First {0}".format(
                    GLOB_RC))

            GLOB_SIGNAL = threading.Event()
            size, buf = self.container.list_attr(cb_func=cb_func)
            GLOB_SIGNAL.wait()
            if GLOB_RC != 0 and expected_result in ['PASS']:
                self.fail(
                    "RC not as expected after list_attr First {0}".format(
                        GLOB_RC))
            if expected_result in ['PASS']:
                verify_list_attr(attr_dict, size, buf, mode="async")

            # Request something that doesn't exist
            if name[0] is not None and "Negative" in name[0]:
                name[0] = "rubbish"

            GLOB_SIGNAL = threading.Event()
            self.container.get_attr([name[0]], cb_func=cb_func)

            GLOB_SIGNAL.wait()

            if GLOB_RC != 0 and expected_result in ['PASS']:
                self.fail(
                    "RC not as expected after get_attr {0}".format(GLOB_RC))

            # not verifying the get_attr since its not available asynchronously

            if value[0] != None:
                if GLOB_RC == 0 and expected_result in ['FAIL']:
                    self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
コード例 #51
0
ファイル: open_close.py プロジェクト: daos-stack/daos
class OpenClose(Test):
    """
    Tests DAOS container open/close function with handle parameter.
    """
    def __init__(self, *args, **kwargs):
        super(OpenClose, self).__init__(*args, **kwargs)
        self.container1 = None
        self.container2 = None

    def setUp(self):
        # these are first since they are referenced in teardown
        self.pool = None
        self.hostlist = None
        self.hostlist = self.params.get("test_servers", '/run/hosts/')

        # get paths from the build_vars generated by build
        with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                               '../../../../.build_vars.json')) as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            try:
                if self.agent_sessions:
                    AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
                server_utils.stop_server(hosts=self.hostlist)
            except server_utils.ServerFailed:
                pass

    def test_closehandle(self):
        """
        Test container close function with container handle paramter.

        :avocado: tags=container,openclose,closehandle
        """
        saved_coh = None

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        coh_params = self.params.get("coh",
                                     '/run/container/container_handle/*/')

        expected_result = coh_params[1]

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle
            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container1.create(poh)
            str_cuuid = self.container1.get_uuid_str()
            cuuid = uuid.UUID(str_cuuid)
            self.container1.open(poh, cuuid, 2, None)

            # Defining 'good' and 'bad' container handles
            saved_coh = self.container1.coh
            if coh_params[0] == 'GOOD':
                coh = self.container1.coh
            else:
                # create a second container, open to get a handle
                # then close & destroy so handle is invalid
                self.container2 = DaosContainer(self.context)
                self.container2.create(poh)
                self.container2.open(poh, cuuid, 2, None)
                coh = self.container2.coh
                self.container2.close()
                self.container2.destroy()

            # close container with either good or bad handle
            self.container1.close(coh)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            if expected_result == 'PASS':
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to pass but it failed.\n")

            # close above failed so close for real with the right coh
            if saved_coh is not None:
                self.container1.close(saved_coh)

        finally:
            self.container1.destroy(1)
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None
コード例 #52
0
ファイル: bad_create.py プロジェクト: daos-stack/daos
    def test_create(self):
        """
        Pass bad parameters to pool create.

        :avocado: tags=pool,poolcreate,badparam,badcreate
        """
        # Accumulate a list of pass/fail indicators representing what is
        # expected for each parameter then "and" them to determine the
        # expected result of the test

        pool = None
        expected_for_param = []

        modelist = self.params.get("mode", '/run/createtests/modes/*')
        mode = modelist[0]
        expected_for_param.append(modelist[1])

        uidlist = self.params.get("uid", '/run/createtests/uids/*')
        uid = uidlist[0]
        expected_for_param.append(uidlist[1])

        gidlist = self.params.get("gid", '/run/createtests/gids/*')
        gid = gidlist[0]
        expected_for_param.append(gidlist[1])

        setidlist = self.params.get("setname", '/run/createtests/setnames/*')
        if setidlist[0] == 'NULLPTR':
            group = None
            self.cancel("skipping this test until DAOS-1991 is fixed")
        else:
            group = setidlist[0]
        expected_for_param.append(setidlist[1])

        targetlist = self.params.get("rankptr", '/run/createtests/target/*')
        if targetlist[0] == 'NULL':
            targetptr = None
        else:
            targetptr = [0]
        expected_for_param.append(targetlist[1])

        # not ready for this yet
        #devicelist = self.params.get("devptr", '/run/createtests/device/*')
        #if devicelist[0] == 'NULL':
        #    devptr = None
        #else:
        #    devptr = devicelist[0]
        #expected_for_param.append(devicelist[1])

        sizelist = self.params.get("size", '/run/createtests/psize/*')
        size = sizelist[0]
        expected_for_param.append(sizelist[1])

        # parameter not presently supported
        #svclist = self.params.get("rankptr", '/run/createtests/svc/*')
        #if svclist[0] == 'NULL':
        #    svc = None
        #else:
        #    svc = None
        #expected_for_param.append(devicelist[1])

        # if any parameter is FAIL then the test should FAIL, in this test
        # virtually everyone should FAIL since we are testing bad parameters
        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        try:
            # setup the DAOS python API
            with open('../../../.build_vars.json') as build_file:
                data = json.load(build_file)
            context = DaosContext(data['PREFIX'] + '/lib/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(context)
            pool.create(mode, uid, gid, size, group, targetptr)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
        finally:
            if pool is not None and pool.attached:
                pool.destroy(1)
コード例 #53
0
ファイル: rebuild_with_io.py プロジェクト: daos-stack/daos
    def test_rebuild_with_io(self):
        """
        Test ID: Rebuild-003

        Test Description: Trigger a rebuild while I/O is ongoing.

        Use Cases:
          -- single pool, single client performing continous read/write/verify
             sequence while failure/rebuild is triggered in another process

        :avocado: tags=pool,rebuild,rebuildwithio
        """

        # the rebuild tests need to redo this stuff each time so not in setup
        # as it usually would be
        server_group = self.params.get("name", '/server_config/',
                                       'daos_server')

        basepath = os.path.normpath(self.build_paths['PREFIX'] + "/../")

        self.hostlist = self.params.get("test_machines", '/run/hosts/')
        hostfile = write_host_file.write_host_file(self.hostlist, self.workdir)

        try:
            self.agent_sessions = AgentUtils.run_agent(basepath, self.hostlist)
            server_utils.run_server(hostfile, server_group, basepath)

            # use the uid/gid of the user running the test, these should
            # be perfectly valid
            createuid = os.geteuid()
            creategid = os.getegid()

            # parameters used in pool create that are in yaml
            createmode = self.params.get("mode", '/run/testparams/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/testparams/createset/')
            createsize = self.params.get("size", '/run/testparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            pool.connect(1 << 1)
            container = DaosContainer(self.context)
            container.create(pool.handle)
            container.open()

            # get pool status and make sure it all looks good before we start
            pool.pool_query()
            if pool.pool_info.pi_ndisabled != 0:
                self.fail("Number of disabled targets reporting incorrectly.\n")
            if pool.pool_info.pi_rebuild_st.rs_errno != 0:
                self.fail("Rebuild error but rebuild hasn't run.\n")
            if pool.pool_info.pi_rebuild_st.rs_done != 1:
                self.fail("Rebuild is running but device hasn't failed yet.\n")
            if pool.pool_info.pi_rebuild_st.rs_obj_nr != 0:
                self.fail("Rebuilt objs not zero.\n")
            if pool.pool_info.pi_rebuild_st.rs_rec_nr != 0:
                self.fail("Rebuilt recs not zero.\n")
            dummy_pool_version = pool.pool_info.pi_rebuild_st.rs_version

            # do I/O for 30 seconds
            dummy_bw = io_utilities.continuous_io(container, 30)

            # trigger the rebuild
            rank = self.params.get("rank", '/run/testparams/ranks/*')
            server = DaosServer(self.context, server_group, rank)
            server.kill(1)
            pool.exclude([rank])

            # do another 30 seconds of I/O,
            # waiting for some improvements in server bootstrap
            # at which point we can move the I/O to a separate client and
            # really pound it with I/O
            dummy_bw = io_utilities.continuous_io(container, 30)

            # wait for the rebuild to finish
            while True:
                pool.pool_query()
                if pool.pool_info.pi_rebuild_st.rs_done == 1:
                    break
                else:
                    time.sleep(2)

            # check rebuild statistics
            if pool.pool_info.pi_ndisabled != 1:
                self.fail("Number of disabled targets reporting incorrectly: {}"
                          .format(pool.pool_info.pi_ndisabled))
            if pool.pool_info.pi_rebuild_st.rs_errno != 0:
                self.fail("Rebuild error reported: {}".format(
                    pool.pool_info.pi_rebuild_st.rs_errno))
            if pool.pool_info.pi_rebuild_st.rs_obj_nr <= 0:
                self.fail("No objects have been rebuilt.")
            if pool.pool_info.pi_rebuild_st.rs_rec_nr <= 0:
                self.fail("No records have been rebuilt.")

        except (ValueError, DaosApiError) as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")

        finally:
            # wait for the I/O process to finish
            try:
                server_utils.stop_server(hosts=self.hostlist)
                os.remove(hostfile)
                # really make sure everything is gone
                check_for_pool.cleanup_pools(self.hostlist)
            finally:
                if self.agent_sessions:
                    AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
                server_utils.kill_server(self.hostlist)
コード例 #54
0
    def test_array_obj(self):
        """
        Test ID: DAOS-961

        Test Description: Writes an array to an object and then reads it
        back and verifies it.

        :avocado: tags=object,arrayobj,regression,vm,small
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()
            print("uid is {} gid is {}".format(createuid, creategid))

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.Context)
            pool.create(createmode, createuid, creategid, createsize,
                        createsetid, None)
            self.pl.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.Context)
            container.create(pool.handle)
            self.pl.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            container.query()

            if container.get_uuid_str() != c_uuid_to_str(
                    container.info.ci_uuid):
                self.fail("Container UUID did not match the one in info\n")

            # create an object and write some data into it
            thedata = []
            thedata.append("data string one")
            thedata.append("data string two")
            thedata.append("data string tre")
            dkey = "this is the dkey"
            akey = "this is the akey"

            self.pl.info("writing array to dkey >%s< akey >%s<.", dkey, akey)
            oid, epoch = container.write_an_array_value(thedata, dkey, akey)

            # read the data back and make sure its correct
            length = len(thedata[0])
            thedata2 = container.read_an_array(len(thedata), length + 1, dkey,
                                               akey, oid, epoch)
            if thedata[0][0:length - 1] != thedata2[0][0:length - 1]:
                self.pl.error("Data mismatch")
                self.pl.error("Wrote: >%s<" (thedata[0]))
                self.pl.error("Read: >%s<" (thedata2[0]))
                self.fail("Write data, read it back, didn't match\n")

            if thedata[2][0:length - 1] != thedata2[2][0:length - 1]:
                self.pl.error("Data mismatch")
                self.pl.error("Wrote: >%s<" (thedata[2]))
                self.pl.error("Read: >%s<" (thedata2[2]))
                self.fail("Write data, read it back, didn't match\n")

            container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            container.destroy()

            # cleanup the pool
            pool.disconnect()
            pool.destroy(1)
            self.pl.info("Test Complete")

        except ValueError as e:
            self.pl.error("Test Failed, exception was thrown.")
            print e
            print traceback.format_exc()
            self.fail("Test was expected to pass but it failed.\n")
コード例 #55
0
ファイル: create.py プロジェクト: daos-stack/daos
    def test_container_create(self):
        """
        Test ID: DAOS-689

        Test Description: valid and invalid container creation and close.

        :avocado: tags=regression,cont,contcreate
        """

        pool = None
        contuuid = None
        expected_results = []

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            createmode = self.params.get("mode", '/run/poolparams/')
            createuid = os.geteuid()
            creategid = os.getegid()
            createsetid = self.params.get("setname", '/run/poolparams/')
            createsize = self.params.get("size", '/run/poolparams/')

            # setup the pool
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid)
            pool.connect(1 << 1)

            # maybe use the good handle, maybe not
            handleparam = self.params.get("handle", '/run/poolhandle/*')
            if handleparam == 'VALID':
                poh = pool.handle
            else:
                poh = handleparam
                expected_results.append('FAIL')

            # maybe use a good UUID, maybe not
            uuidparam = self.params.get("uuid", "/uuids/*")
            expected_results.append(uuidparam[1])
            if uuidparam[0] == 'NULLPTR':
                self.cancel("skipping this test until DAOS-2043 is fixed")
                contuuid = 'NULLPTR'
            else:
                contuuid = uuid.UUID(uuidparam[0])

            should_fail = False
            for result in expected_results:
                if result == 'FAIL':
                    should_fail = True
                    break

            self.container = DaosContainer(self.context)
            self.container.create(poh, contuuid)

            # check UUID is the specified one
            if (uuidparam[0]).upper() != self.container.get_uuid_str().upper():
                print("uuidparam[0] is {}, uuid_str is {}".format(
                    uuidparam[0], self.container.get_uuid_str()))
                self.fail("Container UUID differs from specified at create\n")

            if should_fail:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if not should_fail:
                self.fail("Test was expected to pass but it failed.\n")
        finally:
            if pool is not None:
                pool.disconnect()
                pool.destroy(1)
コード例 #56
0
ファイル: array_obj_test.py プロジェクト: daos-stack/daos
    def test_array_obj(self):
        """
        Test ID: DAOS-961

        Test Description: Writes an array to an object and then reads it
        back and verifies it.

        :avocado: tags=object,arrayobj,regression,vm,small
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/pool_params/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/pool_params/createset/')
            createsize = self.params.get("size", '/run/pool_params/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            self.plog.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            container.query()

            if container.get_uuid_str() != c_uuid_to_str(
                    container.info.ci_uuid):
                self.fail("Container UUID did not match the one in info\n")

            # create an object and write some data into it
            thedata = []
            thedata.append("data string one")
            thedata.append("data string two")
            thedata.append("data string tre")
            dkey = "this is the dkey"
            akey = "this is the akey"

            self.plog.info("writing array to dkey >%s< akey >%s<.", dkey, akey)
            oid, epoch = container.write_an_array_value(thedata, dkey, akey,
                                                        obj_cls=3)

            # read the data back and make sure its correct
            length = len(thedata[0])
            thedata2 = container.read_an_array(len(thedata), length+1,
                                               dkey, akey, oid, epoch)
            if thedata[0][0:length-1] != thedata2[0][0:length-1]:
                self.plog.error("Data mismatch")
                self.plog.error("Wrote: >%s<", thedata[0])
                self.plog.error("Read: >%s<", thedata2[0])
                self.fail("Write data, read it back, didn't match\n")

            if thedata[2][0:length-1] != thedata2[2][0:length-1]:
                self.plog.error("Data mismatch")
                self.plog.error("Wrote: >%s<", thedata[2])
                self.plog.error("Read: >%s<", thedata2[2])
                self.fail("Write data, read it back, didn't match\n")

            container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            container.destroy()

            # cleanup the pool
            pool.disconnect()
            pool.destroy(1)
            self.plog.info("Test Complete")

        except DaosApiError as excep:
            self.plog.error("Test Failed, exception was thrown.")
            print(excep)
            print(traceback.format_exc())
            self.fail("Test was expected to pass but it failed.\n")
コード例 #57
0
ファイル: bad_exclude.py プロジェクト: daos-stack/daos
    def test_exclude(self):
        """
        Pass bad parameters to pool connect

        :avocado: tags=pool,poolexclude,badparam,badexclude
        """
        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')

        createuid = os.geteuid()
        creategid = os.getegid()

        # Accumulate a list of pass/fail indicators representing what is
        # expected for each parameter then "and" them to determine the
        # expected result of the test
        expected_for_param = []

        tgtlist = self.params.get("ranklist", '/run/testparams/tgtlist/*/')
        targets = []

        if tgtlist[0] == "NULLPTR":
            targets = None
            self.cancel("skipping null pointer test until DAOS-1929 is fixed")
        else:
            targets.append(tgtlist[0])
        expected_for_param.append(tgtlist[1])

        svclist = self.params.get("ranklist", '/run/testparams/svrlist/*/')
        svc = svclist[0]
        expected_for_param.append(svclist[1])

        setlist = self.params.get("setname",
                                  '/run/testparams/connectsetnames/*/')
        connectset = setlist[0]
        expected_for_param.append(setlist[1])

        uuidlist = self.params.get("uuid", '/run/testparams/UUID/*/')
        excludeuuid = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        # if any parameter is FAIL then the test should FAIL, in this test
        # virtually everyone should FAIL since we are testing bad parameters
        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        saved_svc = None
        saved_grp = None
        saved_uuid = None
        pool = None
        try:
            # setup the DAOS python API
            with open('../../../.build_vars.json') as build_file:
                data = json.load(build_file)
            context = DaosContext(data['PREFIX'] + '/lib/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # trash the the pool service rank list
            if not svc == 'VALID':
                self.cancel("skipping this test until DAOS-1931 is fixed")
                saved_svc = RankList(pool.svc.rl_ranks, pool.svc.rl_nr)
                pool.svc = None

            # trash the pool group value
            if connectset == 'NULLPTR':
                saved_grp = pool.group
                pool.group = None

            # trash the UUID value in various ways
            if excludeuuid == 'NULLPTR':
                self.cancel("skipping this test until DAOS-1932 is fixed")
                ctypes.memmove(saved_uuid, pool.uuid, 16)
                pool.uuid = 0
            if excludeuuid == 'CRAP':
                self.cancel("skipping this test until DAOS-1932 is fixed")
                ctypes.memmove(saved_uuid, pool.uuid, 16)
                pool.uuid[4] = 244

            pool.exclude(targets)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result in ['PASS']:
                self.fail("Test was expected to pass but it failed.\n")
        finally:
            if pool is not None:
                if saved_svc is not None:
                    pool.svc = saved_svc
                if saved_grp is not None:
                    pool.group = saved_grp
                if saved_uuid is not None:
                    ctypes.memmove(pool.uuid, saved_uuid, 16)

                pool.destroy(1)
コード例 #58
0
ファイル: open_close.py プロジェクト: daos-stack/daos
    def test_closehandle(self):
        """
        Test container close function with container handle paramter.

        :avocado: tags=container,openclose,closehandle
        """
        saved_coh = None

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        coh_params = self.params.get("coh",
                                     '/run/container/container_handle/*/')

        expected_result = coh_params[1]

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None)

            poh = self.pool.handle
            self.pool.connect(1 << 1)

            # Container initialization and creation
            self.container1 = DaosContainer(self.context)
            self.container1.create(poh)
            str_cuuid = self.container1.get_uuid_str()
            cuuid = uuid.UUID(str_cuuid)
            self.container1.open(poh, cuuid, 2, None)

            # Defining 'good' and 'bad' container handles
            saved_coh = self.container1.coh
            if coh_params[0] == 'GOOD':
                coh = self.container1.coh
            else:
                # create a second container, open to get a handle
                # then close & destroy so handle is invalid
                self.container2 = DaosContainer(self.context)
                self.container2.create(poh)
                self.container2.open(poh, cuuid, 2, None)
                coh = self.container2.coh
                self.container2.close()
                self.container2.destroy()

            # close container with either good or bad handle
            self.container1.close(coh)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            if expected_result == 'PASS':
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to pass but it failed.\n")

            # close above failed so close for real with the right coh
            if saved_coh is not None:
                self.container1.close(saved_coh)

        finally:
            self.container1.destroy(1)
            self.pool.disconnect()
            self.pool.destroy(1)
            self.pool = None
コード例 #59
0
    def test_container_basics(self):
        """
        Test basic container create/destroy/open/close/query.  Nothing fancy
        just making sure they work at a rudimentary level

        :avocado: tags=container,containercreate,containerdestroy,basecont
        """

        pool = None
        hostlist = None

        try:
            hostlist = self.params.get("test_machines", '/run/hosts/*')
            hostfile = write_host_file.write_host_file(hostlist,
                                                       self.workdir)

            self.agent_sessions = AgentUtils.run_agent(self.basepath, hostlist)
            server_utils.run_server(hostfile, self.server_group, self.basepath)

            # give it time to start
            time.sleep(2)

            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createuid = self.params.get("uid", '/run/conttests/createuid/')
            creategid = self.params.get("gid", '/run/conttests/creategid/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)

            # now open it
            container.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            container.query()

            if container.get_uuid_str() != c_uuid_to_str(
                    container.info.ci_uuid):
                self.fail("Container UUID did not match the one in info'n")

            container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            container.destroy()

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test was expected to pass but it failed.\n")
        except Exception as excep:
            self.fail("Daos code segfaulted most likely, error: %s" % excep)
        finally:
            # cleanup the pool
            if pool is not None:
                pool.disconnect()
                pool.destroy(1)
            if self.agent_sessions:
                AgentUtils.stop_agent(hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=hostlist)
コード例 #60
0
ファイル: bad_evict.py プロジェクト: daos-stack/daos
    def test_evict(self):
        """
        Test ID: DAOS-427
        Test Description: Pass bad parameters to the pool evict clients call.

        :avocado: tags=pool,poolevict,badparam,badevict
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/evicttests/createmode/')
        createsetid = self.params.get("setname", '/run/evicttests/createset/')
        createsize = self.params.get("size", '/run/evicttests/createsize/')

        createuid = os.geteuid()
        creategid = os.getegid()

        # Accumulate a list of pass/fail indicators representing what is
        # expected for each parameter then "and" them to determine the
        # expected result of the test
        expected_for_param = []

        svclist = self.params.get("ranklist", '/run/evicttests/svrlist/*/')
        svc = svclist[0]
        expected_for_param.append(svclist[1])

        setlist = self.params.get("setname",
                                  '/run/evicttests/connectsetnames/*/')
        evictset = setlist[0]
        expected_for_param.append(setlist[1])

        uuidlist = self.params.get("uuid", '/run/evicttests/UUID/*/')
        excludeuuid = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        # if any parameter is FAIL then the test should FAIL, in this test
        # virtually everyone should FAIL since we are testing bad parameters
        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        saveduuid = None
        savedgroup = None
        savedsvc = None
        pool = None

        try:
            # setup the DAOS python API
            with open('../../../.build_vars.json') as build_file:
                data = json.load(build_file)
            context = DaosContext(data['PREFIX'] + '/lib/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)

            # trash the the pool service rank list
            if not svc == 'VALID':
                savedsvc = pool.svc
                rl_ranks = ctypes.POINTER(ctypes.c_uint)()
                pool.svc = RankList(rl_ranks, 1)

            # trash the pool group value
            if evictset is None:
                savedgroup = pool.group
                pool.group = None

            # trash the UUID value in various ways
            if excludeuuid is None:
                saveduuid = (ctypes.c_ubyte * 16)(0)
                for i in range(0, len(saveduuid)):
                    saveduuid[i] = pool.uuid[i]
                pool.uuid[0:] = [0 for i in range(0, len(pool.uuid))]
            if excludeuuid == 'JUNK':
                saveduuid = (ctypes.c_ubyte * 16)(0)
                for i in range(0, len(saveduuid)):
                    saveduuid[i] = pool.uuid[i]
                pool.uuid[4] = 244

            pool.evict()

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result in ['PASS']:
                self.fail("Test was expected to pass but it failed.\n")
        finally:
            if pool is not None:
                # if the test trashed some pool parameter, put it back the
                # way it was
                if savedgroup is not None:
                    pool.group = savedgroup
                if saveduuid is not None:
                    for i in range(0, len(saveduuid)):
                        pool.uuid[i] = saveduuid[i]
                if savedsvc is not None:
                    pool.svc = savedsvc
                pool.destroy(1)