示例#1
0
    def test_global_handle(self):
        """
        Test ID: DAO

        Test Description: Use a pool handle in another process.

        :avocado: tags=pool,poolhandle,vm,small,regression
        """

        try:

            # use the uid/gid of the user running the test, these should
            # be perfectly valid
            createuid = os.geteuid()
            creategid = os.getegid()

            # parameters used in pool create that are in yaml
            createmode = self.params.get("mode", '/run/testparams/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/testparams/createset/')
            createsize = self.params.get("size", '/run/testparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            pool.connect(1 << 1)

            # create a container just to make sure handle is good
            container = DaosContainer(self.context)
            container.create(pool.handle)

            # create a global handle
            iov_len, buf_len, buf = pool.local2global()

            # this should work in the future but need on-line server addition
            #arg_list = (buf_len, iov_len, buf, pool.get_uuid_str(), 0)
            #p = Process(target=check_handle, args=arg_list)
            #p.start()
            #p.join()
            # for now verifying global handle in the same process which is not
            # the intended use case
            check_handle(buf_len, iov_len, buf, pool.get_uuid_str(), 0)

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")
示例#2
0
    def test_global_handle(self):
        """
        Test ID: DAO

        Test Description: Use a pool handle in another process.

        :avocado: tags=all,pool,pr,tiny,poolglobalhandle
        """

        try:

            # use the uid/gid of the user running the test, these should
            # be perfectly valid
            createuid = os.geteuid()
            creategid = os.getegid()

            # parameters used in pool create that are in yaml
            createmode = self.params.get("mode", '/run/testparams/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/testparams/createset/')
            createsize = self.params.get("size", '/run/testparams/createsize/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            pool.connect(1 << 1)

            # create a container just to make sure handle is good
            container = DaosContainer(self.context)
            container.create(pool.handle)

            # create a global handle
            iov_len, buf_len, buf = pool.local2global()

            # this should work in the future but need on-line server addition
            #arg_list = (buf_len, iov_len, buf, pool.get_uuid_str(), 0)
            #p = Process(target=check_handle, args=arg_list)
            #p.start()
            #p.join()
            # for now verifying global handle in the same process which is not
            # the intended use case
            check_handle(buf_len, iov_len, buf, pool.get_uuid_str(), 0)

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")
示例#3
0
class IorSingleServer(Test):
    """
    Tests IOR with Single Server config.

    """
    def setUp(self):
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')
        self.daosctl = self.basepath + '/install/bin/daosctl'

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.pool = None

        self.hostlist_servers = self.params.get("test_servers",
                                                '/run/hosts/test_machines/*')
        self.hostfile_servers = (
            write_host_file.write_host_file(self.hostlist_servers,
                                            self.workdir))
        print("Host file servers is: {}".format(self.hostfile_servers))

        self.hostlist_clients = (
            self.params.get("clients",
                            '/run/hosts/test_machines/diff_clients/*'))
        self.hostfile_clients = (
            write_host_file.write_host_file(self.hostlist_clients,
                                            self.workdir))
        print("Host file clientsis: {}".format(self.hostfile_clients))

        self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                   self.hostlist_servers,
                                                   self.hostlist_clients)
        server_utils.run_server(self.hostfile_servers, self.server_group,
                                self.basepath)

        if int(str(self.name).split("-")[0]) == 1:
            ior_utils.build_ior(self.basepath)

    def tearDown(self):
        try:
            if self.hostfile_clients is not None:
                os.remove(self.hostfile_clients)
            if self.hostfile_servers is not None:
                os.remove(self.hostfile_servers)
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist_clients,
                                      self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist_servers)

    def test_singleserver(self):
        """
        Test IOR with Single Server config.

        :avocado: tags=ior,singleserver
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createsvc = self.params.get("svcn", '/run/createtests/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        ior_flags = self.params.get("F", '/run/ior/iorflags/')
        transfer_size = self.params.get("t", '/run/ior/transfersize/')
        record_size = self.params.get("r", '/run/ior/recordsize/')
        segment_count = self.params.get("s", '/run/ior/segmentcount/')
        stripe_count = self.params.get("c", '/run/ior/stripecount/')
        async_io = self.params.get("a", '/run/ior/asyncio/')
        object_class = self.params.get("o", '/run/ior/objectclass/')

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None, None, createsvc)
            pool_uuid = self.pool.get_uuid_str()
            print ("pool_uuid: {}".format(pool_uuid))
            tmp_rank_list = []
            svc_list = ""
            for i in range(createsvc):
                tmp_rank_list.append(int(self.pool.svc.rl_ranks[i]))
                svc_list += str(tmp_rank_list[i]) + ":"
            svc_list = svc_list[:-1]

            if len(self.hostlist_clients) == 1:
                block_size = '12g'
            elif len(self.hostlist_clients) == 2:
                block_size = '6g'
            elif len(self.hostlist_clients) == 4:
                block_size = '3g'

            ior_utils.run_ior(self.hostfile_clients, ior_flags, iteration,
                              block_size, transfer_size, pool_uuid, svc_list,
                              record_size, segment_count, stripe_count,
                              async_io, object_class, self.basepath)

        except (DaosApiError, ior_utils.IorFailed) as excep:
            self.fail("<Single Server Test FAILED>\n {}".format(excep))
示例#4
0
文件: test_utils.py 项目: paf-49/daos
class TestPool(TestDaosApiBase):
    """A class for functional testing of DaosPools objects."""
    def __init__(self, context, log, cb_handler=None):
        """[summary].

        Args:
            context (DaosContext): [description]
            log (logging): logging object used to report the pool status
            cb_handler (CallbackHandler, optional): callback object to use with
                the API methods. Defaults to None.
        """
        super(TestPool, self).__init__("/run/pool/*", cb_handler)
        self.context = context
        self.log = log
        self.uid = os.geteuid()
        self.gid = os.getegid()

        self.mode = BasicParameter(None)
        self.name = BasicParameter(None)  # server group name
        self.svcn = BasicParameter(None)
        self.target_list = BasicParameter(None)
        self.scm_size = BasicParameter(None)
        self.nvme_size = BasicParameter(None)

        self.pool = None
        self.uuid = None
        self.info = None
        self.svc_ranks = None
        self.connected = False

    @fail_on(DaosApiError)
    def create(self):
        """Create a pool.

        Destroys an existing pool if defined and assigns self.pool and
        self.uuid.
        """
        self.destroy()
        self.log.info("Creating a pool{}".format(" on targets {}".format(
            self.target_list.value) if self.target_list.value else ""))
        self.pool = DaosPool(self.context)
        kwargs = {
            "mode": self.mode.value,
            "uid": self.uid,
            "gid": self.gid,
            "scm_size": self.scm_size.value,
            "group": self.name.value
        }
        for key in ("target_list", "svcn", "nvme_size"):
            value = getattr(self, key).value
            if value:
                kwargs[key] = value
        self._call_method(self.pool.create, kwargs)
        self.uuid = self.pool.get_uuid_str()
        self.svc_ranks = [
            int(self.pool.svc.rl_ranks[index])
            for index in range(self.pool.svc.rl_nr)
        ]
        self.log.info("  Pool created with uuid {} and svc ranks {}".format(
            self.uuid, self.svc_ranks))

    @fail_on(DaosApiError)
    def connect(self, permission=1):
        """Connect to the pool.

        Args:
            permission (int, optional): connect permission. Defaults to 1.

        Returns:
            bool: True if the pool has been connected; False if the pool was
                already connected or the pool is not defined.

        """
        if self.pool and not self.connected:
            kwargs = {"flags": 1 << permission}
            self.log.info(
                "Connecting to pool %s with permission %s (flag: %s)",
                self.uuid, permission, kwargs["flags"])
            self._call_method(self.pool.connect, kwargs)
            self.connected = True
            return True
        return False

    @fail_on(DaosApiError)
    def disconnect(self):
        """Disconnect from connected pool.

        Returns:
            bool: True if the pool has been disconnected; False if the pool was
                already disconnected or the pool is not defined.

        """
        if self.pool and self.connected:
            self.log.info("Disonnecting from pool %s", self.uuid)
            self._call_method(self.pool.disconnect, {})
            self.connected = False
            return True
        return False

    @fail_on(DaosApiError)
    def destroy(self, force=1):
        """Destroy the pool.

        Args:
            force (int, optional): force flag. Defaults to 1.

        Returns:
            bool: True if the pool has been destroyed; False if the pool is not
                defined.

        """
        if self.pool:
            self.disconnect()
            self.log.info("Destroying pool %s", self.uuid)
            if self.pool.attached:
                self._call_method(self.pool.destroy, {"force": force})
            self.pool = None
            self.uuid = None
            self.info = None
            self.svc_ranks = None
            return True
        return False

    @fail_on(DaosApiError)
    def get_info(self):
        """Query the pool for information.

        Sets the self.info attribute.
        """
        if self.pool:
            self.connect()
            self._call_method(self.pool.pool_query, {})
            self.info = self.pool.pool_info

    def check_pool_info(self,
                        pi_uuid=None,
                        pi_ntargets=None,
                        pi_nnodes=None,
                        pi_ndisabled=None,
                        pi_map_ver=None,
                        pi_leader=None,
                        pi_bits=None):
        # pylint: disable=unused-argument
        """Check the pool info attributes.

        Args:
            pi_uuid (str, optional): pool uuid. Defaults to None.
            pi_ntargets (int, optional): number of targets. Defaults to None.
            pi_nnodes (int, optional): number of nodes. Defaults to None.
            pi_ndisabled (int, optional): number of disabled. Defaults to None.
            pi_map_ver (int, optional): pool map version. Defaults to None.
            pi_leader (int, optional): pool leader. Defaults to None.
            pi_bits (int, optional): pool bits. Defaults to None.

        Note:
            Arguments may also be provided as a string with a number preceeded
            by '<', '<=', '>', or '>=' for other comparisions besides the
            default '=='.

        Returns:
            bool: True if at least one expected value is specified and all the
                specified values match; False otherwise

        """
        self.get_info()
        checks = [(key, c_uuid_to_str(getattr(self.info, key))
                   if key == "pi_uuid" else getattr(self.info, key), val)
                  for key, val in locals().items()
                  if key != "self" and val is not None]
        return self._check_info(checks)

    def check_pool_space(self,
                         ps_free_min=None,
                         ps_free_max=None,
                         ps_free_mean=None,
                         ps_ntargets=None,
                         ps_padding=None):
        # pylint: disable=unused-argument
        """Check the pool info space attributes.

        Args:
            ps_free_min (list, optional): minimum free space per device.
                Defaults to None.
            ps_free_max (list, optional): maximum free space per device.
                Defaults to None.
            ps_free_mean (list, optional): mean free space per device.
                Defaults to None.
            ps_ntargets (int, optional): number of targets. Defaults to None.
            ps_padding (int, optional): space padding. Defaults to None.

        Note:
            Arguments may also be provided as a string with a number preceeded
            by '<', '<=', '>', or '>=' for other comparisions besides the
            default '=='.

        Returns:
            bool: True if at least one expected value is specified and all the
                specified values match; False otherwise

        """
        self.get_info()
        checks = []
        for key in ("ps_free_min", "ps_free_max", "ps_free_mean"):
            val = locals()[key]
            if isinstance(val, list):
                for index, item in val:
                    checks.append(("{}[{}]".format(key, index),
                                   getattr(self.info.pi_space,
                                           key)[index], item))
        for key in ("ps_ntargets", "ps_padding"):
            val = locals()[key]
            if val is not None:
                checks.append(key, getattr(self.info.pi_space, key), val)
        return self._check_info(checks)

    def check_pool_daos_space(self, s_total=None, s_free=None):
        # pylint: disable=unused-argument
        """Check the pool info daos space attributes.

        Args:
            s_total (list, optional): total space per device. Defaults to None.
            s_free (list, optional): free space per device. Defaults to None.

        Note:
            Arguments may also be provided as a string with a number preceeded
            by '<', '<=', '>', or '>=' for other comparisions besides the
            default '=='.

        Returns:
            bool: True if at least one expected value is specified and all the
                specified values match; False otherwise

        """
        self.get_info()
        checks = [("{}_{}".format(key, index),
                   getattr(self.info.pi_space.ps_space, key)[index], item)
                  for key, val in locals().items()
                  if key != "self" and val is not None
                  for index, item in enumerate(val)]
        return self._check_info(checks)

    def check_rebuild_status(self,
                             rs_version=None,
                             rs_pad_32=None,
                             rs_errno=None,
                             rs_done=None,
                             rs_toberb_obj_nr=None,
                             rs_obj_nr=None,
                             rs_rec_nr=None):
        # pylint: disable=unused-argument
        """Check the pool info rebuild attributes.

        Args:
            rs_version (int, optional): rebuild version. Defaults to None.
            rs_pad_32 (int, optional): rebuild pad. Defaults to None.
            rs_errno (int, optional): rebuild error number. Defaults to None.
            rs_done (int, optional): rebuild done flag. Defaults to None.
            rs_toberb_obj_nr (int, optional): number of objects to be rebuilt.
                Defaults to None.
            rs_obj_nr (int, optional): number of rebuilt objects.
                Defaults to None.
            rs_rec_nr (int, optional): number of rebuilt records.
                Defaults to None.

        Note:
            Arguments may also be provided as a string with a number preceeded
            by '<', '<=', '>', or '>=' for other comparisions besides the
            default '=='.

        Returns:
            bool: True if at least one expected value is specified and all the
                specified values match; False otherwise

        """
        self.get_info()
        checks = [(key, getattr(self.info.pi_rebuild_st, key), val)
                  for key, val in locals().items()
                  if key != "self" and val is not None]
        return self._check_info(checks)

    def _check_info(self, check_list):
        """Verify each pool info attribute value matches an expected value.

        Args:
            check_list (list): a list of tuples containing the name of the pool
                information attribute to check, the current value of the
                attribute, and the expected value of the attribute. If the
                expected value is specified as a string with a number preceeded
                by '<', '<=', '>', or '>=' then this comparision will be used
                instead of the defult '=='.

        Returns:
            bool: True if at least one check has been specified and all the
            actual and expected values match; False otherwise.

        """
        check_status = len(check_list) > 0
        for check, actual, expect in check_list:
            # Determine which comparision to utilize for this check
            compare = ("==", lambda x, y: x == y, "does not match")
            if isinstance(expect, str):
                comparisions = {
                    "<": (lambda x, y: x < y, "is too large"),
                    ">": (lambda x, y: x > y, "is too small"),
                    "<=":
                    (lambda x, y: x <= y, "is too large or does not match"),
                    ">=":
                    (lambda x, y: x >= y, "is too small or does not match"),
                }
                for key, val in comparisions.items():
                    # If the expected value is preceeded by one of the known
                    # comparision keys, use the comparision and remove the key
                    # from the expected value
                    if expect[:len(key)] == key:
                        compare = (key, val[0], val[1])
                        expect = expect[len(key):]
                        try:
                            expect = int(expect)
                        except ValueError:
                            # Allow strings to be strings
                            pass
                        break
            self.log.info("Verifying the pool %s: %s %s %s", check, actual,
                          compare[0], expect)
            if not compare[1](actual, expect):
                msg = "  The {} {}: actual={}, expected={}".format(
                    check, compare[2], actual, expect)
                self.log.error(msg)
                check_status = False
        return check_status

    def rebuild_complete(self):
        """Determine if the pool rebuild is complete.

        Returns:
            bool: True if pool rebuild is complete; False otherwise

        """
        self.get_info()
        return self.info.pi_rebuild_st.rs_done == 1

    def wait_for_rebuild(self, to_start, interval=1):
        """Wait for the rebuild to start or end.

        Args:
            to_start (bool): whether to wait for rebuild to start or end
            interval (int): number of seconds to wait in between rebuild
                completion checks
        """
        self.log.info("Waiting for rebuild to %s ...",
                      "start" if to_start else "complete")
        while self.rebuild_complete() == to_start:
            self.log.info("  Rebuild %s ...",
                          "has not yet started" if to_start else "in progress")
            sleep(interval)
        self.log.info("Rebuild %s detected",
                      "start" if to_start else "completion")

    @fail_on(DaosApiError)
    def start_rebuild(self, ranks, daos_log):
        """Kill the specific server ranks using this pool.

        Args:
            ranks (list): a list of daos server ranks (int) to kill
            daos_log (DaosLog): object for logging messages

        Returns:
            bool: True if the server ranks have been killed and the ranks have
            been excluded from the pool; False if the pool is undefined

        """
        msg = "Killing DAOS ranks {} from server group {}".format(
            ranks, self.name.value)
        self.log.info(msg)
        daos_log.info(msg)
        for rank in ranks:
            server = DaosServer(self.context, self.name.value, rank)
            server.kill(1)
        return self.exclude(ranks, daos_log)

    @fail_on(DaosApiError)
    def exclude(self, ranks, daos_log):
        """Manually exclude a rank from this pool.

        Args:
            ranks (list): a list daos server ranks (int) to exclude
            daos_log (DaosLog): object for logging messages

        Returns:
            bool: True if the ranks were excluded from the pool; False if the
                pool is undefined

        """
        if self.pool:
            msg = "Excluding server ranks {} from pool {}".format(
                ranks, self.uuid)
            self.log.info(msg)
            daos_log.info(msg)
            self._call_method(self.pool.exclude, {"rank_list": ranks})
            return True
        return False

    def check_files(self, hosts):
        """Check if pool files exist on the specified list of hosts.

        Args:
            hosts (list): list of hosts

        Returns:
            bool: True if the files for this pool exist on each host; False
                otherwise

        """
        return check_pool_files(self.log, hosts, self.uuid.lower())

    def write_file(self, orterun, processes, hostfile, size, timeout=60):
        """Write a file to the pool.

        Args:
            orterun (str): full path to the orterun command
            processes (int): number of processes to launch
            hosts (list): list of clients from which to write the file
            size (int): size of the file to create in bytes
            timeout (int, optional): number of seconds before timing out the
                command. Defaults to 60 seconds.

        Returns:
            process.CmdResult: command execution result

        """
        self.log.info("Writing {} bytes to pool {}".format(size, self.uuid))
        env = {
            "DAOS_POOL": self.uuid,
            "DAOS_SVCL": "1",
            "DAOS_SINGLETON_CLI": "1",
            "PYTHONPATH": os.getenv("PYTHONPATH", ""),
        }
        current_path = os.path.dirname(os.path.abspath(__file__))
        command = "{} --np {} --hostfile {} {} {} testfile".format(
            orterun, processes, hostfile,
            os.path.join(current_path, "write_some_data.py"), size)
        return process.run(command, timeout, True, False, "both", True, env)

    def get_pool_daos_space(self):
        """Get the pool info daos space attributes as a dictionary.

        Returns:
            dict: a dictionary of lists of the daos space attributes

        """
        self.get_info()
        keys = ("s_total", "s_free")
        return {key: getattr(self.info.pi_space.ps_space, key) for key in keys}

    def display_pool_daos_space(self, msg=None):
        """Display the pool info daos space attributes.

        Args:
            msg (str, optional): optional text to include in the output.
                Defaults to None.
        """
        daos_space = self.get_pool_daos_space()
        sizes = [
            "{}[{}]={}".format(key, index, item)
            for key in sorted(daos_space.keys())
            for index, item in enumerate(daos_space[key])
        ]
        self.log.info("Pool %s space%s:\n  %s", self.uuid,
                      " " + msg if isinstance(msg, str) else "",
                      "\n  ".join(sizes))

    def read_data_during_rebuild(self, container):
        """Read data from the container while rebuild is active.

        Args:
            container (TestContainer): container from which to read data

        Returns:
            bool: True if all the data is read sucessfully befoire rebuild
                completes; False otherwise

        """
        container.open()
        self.log.info("Reading objects in container %s during rebuild",
                      self.uuid)

        # Attempt to read all of the data from the container during rebuild
        index = 0
        status = read_incomplete = index < len(container.written_data)
        while not self.rebuild_complete() and read_incomplete:
            try:
                status &= container.written_data[index].read_object(container)
            except DaosTestError as error:
                self.log.error(str(error))
                status = False
            index += 1
            read_incomplete = index < len(container.written_data)

        # Verify that all of the container data was read successfully
        if read_incomplete:
            self.log.error(
                "Rebuild completed before all the written data could be read")
            status = False
        elif not status:
            self.log.error("Errors detected reading data during rebuild")
        return status
示例#5
0
class PoolSvc(TestWithServers):
    """
    Tests svc argument while pool create.
    :avocado: recursive
    """
    def setUp(self):
        super(PoolSvc, self).setUp()
        self.pool = None

        self.hostfile_servers = None
        self.hostlist_servers = self.params.get("test_machines",
                                                '/run/hosts/*')
        self.hostfile_servers = write_host_file.write_host_file(
            self.hostlist_servers, self.workdir)
        print("Host file is: {}".format(self.hostfile_servers))

        self.agent_sessions = agent_utils.run_agent(self.basepath,
                                                    self.hostlist_servers)
        server_utils.run_server(self.hostfile_servers, self.server_group,
                                self.basepath)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            super(PoolSvc, self).tearDown()

    def test_poolsvc(self):
        """
        Test svc arg during pool create.

        :avocado: tags=pool,svc
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createsvc = self.params.get("svc", '/run/createtests/createsvc/*/')

        expected_result = createsvc[1]

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None, None, createsvc[0])
            self.pool.connect(1 << 1)

            # checking returned rank list for server more than 1
            iterator = 0
            while (int(self.pool.svc.rl_ranks[iterator]) > 0
                   and int(self.pool.svc.rl_ranks[iterator]) <= createsvc[0]
                   and int(self.pool.svc.rl_ranks[iterator]) != 999999):
                iterator += 1
                if iterator != createsvc[0]:
                    self.fail("Length of Returned Rank list is not equal to "
                              "the number of Pool Service members.\n")
            rank_list = []
            for iterator in range(createsvc[0]):
                rank_list.append(int(self.pool.svc.rl_ranks[iterator]))
                if len(rank_list) != len(set(rank_list)):
                    self.fail("Duplicate values in returned rank list")

            if createsvc[0] == 3:
                self.pool.disconnect()
                cmd = ('{0} kill-leader  --uuid={1}'.format(
                    self.daosctl, self.pool.get_uuid_str()))
                process.system(cmd)
                self.pool.connect(1 << 1)
                self.pool.disconnect()
                server = DaosServer(self.context, self.server_group, 2)
                server.kill(1)
                self.pool.exclude([2])
                self.pool.connect(1 << 1)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
示例#6
0
class ObjectMetadata(avocado.Test):
    """
    Test Class Description:
        Test the general Metadata operations and boundary conditions.
    """

    def setUp(self):
        self.agent_sessions = None
        self.pool = None
        self.hostlist = None
        self.hostfile_clients = None
        self.hostfile = None
        self.out_queue = None
        self.pool_connect = True

        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)

        self.basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        self.server_group = self.params.get("name",
                                            '/server_config/',
                                            'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("servers", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)
        self.hostlist_clients = self.params.get("clients", '/run/hosts/*')
        self.hostfile_clients = (
            write_host_file.write_host_file(hostlist_clients, self.workdir))
        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist,
                                                   self.hostlist_clients)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(),
                         os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         nvme_size=self.params.get("size",
                                                   '/run/pool/nvmesize/*'))

    def tearDown(self):
        try:
            if self.pool_connect:
                self.pool.disconnect()
            if self.pool:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist_clients,
                                      self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    @avocado.skip("Skipping until DAOS-1936/DAOS-1946 is fixed.")
    def test_metadata_fillup(self):
        """
        Test ID: DAOS-1512
        Test Description: Test to verify no IO happens after metadata is full.
        :avocado: tags=metadata,metadata_fill,nvme,small
        """
        self.pool.connect(2)
        container = DaosContainer(self.context)
        self.d_log.debug("Fillup Metadata....")
        for _cont in range(NO_OF_MAX_CONTAINER):
            container.create(self.pool.handle)
        self.d_log.debug("Metadata Overload...")
        #This should fail with no Metadata space Error.
        try:
            for _cont in range(250):
                container.create(self.pool.handle)
        except DaosApiError as exe:
            print (exe, traceback.format_exc())
            return

        self.fail("Test was expected to fail but it passed.\n")

    @avocado.skip("Skipping until DAOS-1965 is fixed.")
    @avocado.fail_on(DaosApiError)
    def test_metadata_addremove(self):
        """
        Test ID: DAOS-1512
        Test Description: Verify metadata release the space
                          after container delete.
        :avocado: tags=metadata,metadata_free_space,nvme,small
        """
        self.pool.connect(2)
        for k in range(10):
            container_array = []
            self.d_log.debug("Container Create Iteration {}".format(k))
            for cont in range(NO_OF_MAX_CONTAINER):
                container = DaosContainer(self.context)
                container.create(self.pool.handle)
                container_array.append(container)
            self.d_log.debug("Container Remove Iteration {} ".format(k))
            for cont in container_array:
                cont.destroy()

    def thread_control(self, threads, operation):
        """
        Start threads and wait till all threads execution is finished.
        It check queue for "FAIL" message and fail the avocado test.
        """
        self.d_log.debug("IOR {0} Threads Started -----".format(operation))
        for thrd in threads:
            thrd.start()
        for thrd in threads:
            thrd.join()

        while not self.out_queue.empty():
            if self.out_queue.get() == "FAIL":
                return "FAIL"
        self.d_log.debug("IOR {0} Threads Finished -----".format(operation))
        return "PASS"

    @avocado.fail_on(DaosApiError)
    def test_metadata_server_restart(self):
        """
        Test ID: DAOS-1512
        Test Description: This test will verify 2000 IOR small size container
                          after server restart. Test will write IOR in 5
                          different threads for faster execution time. Each
                          thread will create 400 (8bytes) containers to the
                          same pool. Restart the servers, read IOR container
                          file written previously and validate data integrity
                          by using IOR option "-R -G 1".
        :avocado: tags=metadata,metadata_ior,nvme,small
        """
        self.pool_connect = False
        files_per_thread = 400
        total_ior_threads = 5
        threads = []
        ior_args = {}

        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        svc_list = ""
        for i in range(createsvc):
            svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":"
        svc_list = svc_list[:-1]

        ior_args['client_hostfile'] = self.hostfile_clients
        ior_args['pool_uuid'] = self.pool.get_uuid_str()
        ior_args['svc_list'] = svc_list
        ior_args['basepath'] = self.basepath
        ior_args['server_group'] = self.server_group
        ior_args['tmp_dir'] = self.workdir
        ior_args['iorwriteflags'] = self.params.get("F",
                                                    '/run/ior/iorwriteflags/')
        ior_args['iorreadflags'] = self.params.get("F",
                                                   '/run/ior/iorreadflags/')
        ior_args['iteration'] = self.params.get("iter", '/run/ior/iteration/')
        ior_args['stripe_size'] = self.params.get("s", '/run/ior/stripesize/*')
        ior_args['stripe_count'] = self.params.get("c", '/run/ior/stripecount/')
        ior_args['async_io'] = self.params.get("a", '/run/ior/asyncio/')
        ior_args['object_class'] = self.params.get("o", '/run/ior/objectclass/')
        ior_args['slots'] = self.params.get("slots", '/run/ior/clientslots/*')

        ior_args['files_per_thread'] = files_per_thread
        self.out_queue = Queue.Queue()

        #IOR write threads
        for i in range(total_ior_threads):
            threads.append(threading.Thread(target=ior_runner_thread,
                                            args=(self.out_queue,
                                                  "Thread-{}".format(i),
                                                  "write"),
                                            kwargs=ior_args))
        if self.thread_control(threads, "write") == "FAIL":
            self.d_log.error(" IOR write Thread FAIL")
            self.fail(" IOR write Thread FAIL")

        #Server Restart
        if self.agent_sessions:
            AgentUtils.stop_agent(self.hostlist_clients, self.agent_sessions)
        server_utils.stop_server(hosts=self.hostlist)
        self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                   self.hostlist_clients,
                                                   self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

        #Read IOR with verification with same number of threads
        threads = []
        for i in range(total_ior_threads):
            threads.append(threading.Thread(target=ior_runner_thread,
                                            args=(self.out_queue,
                                                  "Thread-{}".format(i),
                                                  "read"),
                                            kwargs=ior_args))
        if self.thread_control(threads, "read") == "FAIL":
            self.d_log.error(" IOR write Thread FAIL")
            self.fail(" IOR read Thread FAIL")
示例#7
0
    def test_bad_handle(self):
        """
        Test ID: DAOS-1376

        Test Description: Pass a bogus object handle, should return bad handle.

        :avocado: tags=object,objupdate,objbadhand,regression,vm,small
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            self.plog.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # create an object and write some data into it
            thedata = "a string that I want to stuff into an object"
            thedatasize = len(thedata) + 1
            dkey = "this is the dkey"
            akey = "this is the akey"
            obj, dummy_tx = container.write_an_obj(thedata, thedatasize,
                                                   dkey, akey, None, None, 2)

            saved_oh = obj.obj_handle
            obj.obj_handle = 99999

            obj, dummy_tx = container.write_an_obj(thedata, thedatasize,
                                                   dkey, akey, obj, None, 2)

            container.oh = saved_oh
            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            self.fail("Test was expected to return a -1002 but it has not.\n")

        except DaosApiError as excep:
            container.oh = saved_oh
            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            self.plog.info("Test Complete")
            if '-1002' not in str(excep):
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1002 but it has not.\n")
示例#8
0
class DestroyRebuild(Test):

    """
    Test Class Description:
    This test verifies destruction of a pool that is rebuilding.

    :avocado: tags=pool,pooldestroy,rebuild,desreb
    """

    build_paths = []
    server_group = ""
    CONTEXT = None
    POOL = None
    hostfile = ""

    def setUp(self):
        """ setup for the test """

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as f:
              build_paths = json.load(f)
        self.CONTEXT = DaosContext(build_paths['PREFIX'] + '/lib/')

        # generate a hostfile
        self.hostlist = self.params.get("test_machines",'/run/hosts/')
        tmp = build_paths['PREFIX'] + '/tmp'
        self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, tmp)

        # fire up the DAOS servers
        self.server_group = self.params.get("server_group",'/run/server/',
                                      'daos_server')
        ServerUtils.runServer(self.hostfile, self.server_group,
                             build_paths['PREFIX'] + '/../')
        time.sleep(3)

        # create a pool to test with
        createmode = self.params.get("mode",'/run/pool/createmode/')
        createuid  = self.params.get("uid",'/run/pool/createuid/')
        creategid  = self.params.get("gid",'/run/pool/creategid/')
        createsetid = self.params.get("setname",'/run/pool/createset/')
        createsize  = self.params.get("size",'/run/pool/createsize/')
        self.POOL = DaosPool(self.CONTEXT)
        self.POOL.create(createmode, createuid, creategid, createsize,
                        createsetid)
        uuid = self.POOL.get_uuid_str()

        time.sleep(2)

    def tearDown(self):
        """ cleanup after the test """

        try:
            os.remove(self.hostfile)
            if self.POOL:
                self.POOL.destroy(1)
        finally:
            ServerUtils.stopServer(hosts=self.hostlist)


    def test_destroy_while_rebuilding(self):
        """
        :avocado: tags=pool,pooldestroy,rebuild,desreb
        """
        try:
            print "\nsetup complete, starting test\n"

            # create a server object that references on of our pool target hosts
            # and then kill it
            svr_to_kill = int(self.params.get("rank_to_kill",
                                              '/run/testparams/ranks/'))
            sh = DaosServer(self.CONTEXT, bytes(self.server_group), svr_to_kill)

            print "created server "

            # BUG if you don't connect the rebuild doesn't start correctly
            self.POOL.connect(1 << 1)
            status = self.POOL.pool_query()
            if not status.pi_ntargets == len(self.hostlist):
                self.fail("target count wrong.\n")
            if not status.pi_ndisabled == 0:
                self.fail("disabled target count wrong.\n")

            print "connect "

            time.sleep(1)
            sh.kill(1)

            print "killed server "

            # exclude the target from the dead server
            self.POOL.exclude([svr_to_kill])

            print "exclude target "

            #self.POOL.disconnect()
            #print "disconnect "

            # the rebuild won't take long since there is no data so do
            # the destroy quickly
            self.POOL.destroy(1)
            print "destroy "

        except DaosApiError as e:
                print(e)
                print(traceback.format_exc())
                self.fail("Expecting to pass but test has failed.\n")
示例#9
0
class PoolSvc(Test):
    """
    Tests svc argument while pool create.

    """
    def setUp(self):
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as f:
            build_paths = json.load(f)
        self.basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        self.tmp = build_paths['PREFIX'] + '/tmp'

        self.server_group = self.params.get("server_group",'/server/','daos_server')
        self.daosctl = self.basepath + '/install/bin/daosctl'

        # setup the DAOS python API
        self.Context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.POOL = None

        self.hostfile = None
        self.hostlist = self.params.get("test_machines",'/run/hosts/*')
        self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.tmp)
        print("Host file is: {}".format(self.hostfile))

        ServerUtils.runServer(self.hostfile, self.server_group, self.basepath)
        time.sleep(5)

    def tearDown(self):
        try:
            if self.hostfile is not None:
                os.remove(self.hostfile)
            if self.POOL is not None and self.POOL.attached:
                self.POOL.destroy(1)
        finally:
            ServerUtils.stopServer(hosts=self.hostlist)

    def test_poolsvc(self):
        """
        Test svc arg during pool create.

        :avocado: tags=pool,svc
        """

        # parameters used in pool create
        createmode = self.params.get("mode",'/run/createtests/createmode/*/')
        createuid  = os.geteuid()
        creategid  = os.getegid()
        createsetid = self.params.get("setname",'/run/createtests/createset/')
        createsize  = self.params.get("size",'/run/createtests/createsize/')
        createsvc  = self.params.get("svc",'/run/createtests/createsvc/*/')

        expected_result = createsvc[1]

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.POOL = DaosPool(self.Context)
            self.POOL.create(createmode, createuid, creategid,
                    createsize, createsetid, None, None, createsvc[0])
            self.POOL.connect(1 << 1)
            # checking returned rank list value for single server
            if ((len(self.hostlist) == 1) and (int(self.POOL.svc.rl_ranks[i] != 0))):
                self.fail("Incorrect returned rank list value for single server")
            # checking returned rank list for server more than 1
            i = 0
            while ((int(self.POOL.svc.rl_ranks[i]) > 0) and \
                  (int(self.POOL.svc.rl_ranks[i]) <= createsvc[0]) and \
                  (int(self.POOL.svc.rl_ranks[i]) != 999999)):
                i +=1
            if i != createsvc[0]:
                self.fail("Length of Returned Rank list is not equal to" \
                          " the number of Pool Service members.\n")
            list = []
            for j in range(createsvc[0]):
                list.append(int(self.POOL.svc.rl_ranks[j]))
                if len(list) != len(set(list)):
                    self.fail("Duplicate values in returned rank list")

            if (createsvc[0] == 3):
                self.POOL.disconnect()
                cmd = ('{0} kill-leader  --uuid={1}'
                        .format(self.daosctl, self.POOL.get_uuid_str()))
                process.system(cmd)
                self.POOL.connect(1 << 1)
                self.POOL.disconnect()
                server = DaosServer(self.Context, self.server_group, 2)
                server.kill(1)
                self.POOL.exclude([2])
                self.POOL.connect(1 << 1)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as e:
            print(e)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
示例#10
0
class SegCount(Test):
    """
    Test class Description: Runs IOR with different segment counts.

    """

    def __init__(self, *args, **kwargs):

        super(SegCount, self).__init__(*args, **kwargs)

        self.basepath = None
        self.context = None
        self.pool = None
        self.slots = None
        self.hostlist_servers = None
        self.hostfile_clients = None

    def setUp(self):
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.hostlist_servers = self.params.get("test_servers", '/run/hosts/*')
        hostfile_servers = (
            write_host_file.write_host_file(self.hostlist_servers,
                                            self.workdir))
        print("Host file servers is: {}".format(hostfile_servers))

        hostlist_clients = self.params.get("test_clients", '/run/hosts/*')
        self.slots = self.params.get("slots", '/run/ior/clientslots/*')
        self.hostfile_clients = (
            write_host_file.write_host_file(hostlist_clients, self.workdir,
                                            self.slots))
        print("Host file clients is: {}".format(self.hostfile_clients))

        self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                   self.hostlist_servers,
                                                   self.hostlist_clients)
        server_utils.run_server(hostfile_servers, self.server_group,
                                self.basepath)

        if int(str(self.name).split("-")[0]) == 1:
            ior_utils.build_ior(self.basepath)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist_clients,
                                      self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist_servers)

    def test_segcount(self):
        """
        Test ID: DAOS-1782
        Test Description: Run IOR with 32,64 and 128 clients with different
                          segment counts.
        Use Cases: Different combinations of 32/64/128 Clients, 8b/1k/4k
                   record size, 1k/4k/1m/8m transfersize and stripesize
                   and 16 async io.
        :avocado: tags=ior,eightservers,ior_segcount,performance
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        ior_flags = self.params.get("F", '/run/ior/iorflags/')
        stripe_count = self.params.get("c", '/run/ior/stripecount/')
        async_io = self.params.get("a", '/run/ior/asyncio/')
        object_class = self.params.get("o", '/run/ior/objectclass/*/')
        record_size = self.params.get("r", '/run/ior/recordsize/*')
        block_size = (
            self.params.get("b",
                            '/run/ior/blocksize_transfersize_stripesize/*/'))
        transfer_size = (
            self.params.get("t",
                            '/run/ior/blocksize_transfersize_stripesize/*/'))
        stripe_size = (
            self.params.get("s",
                            '/run/ior/blocksize_transfersize_stripesize/*/'))


        if block_size == '4k' and self.slots == 16:
            segment_count = 491500
        elif block_size == '4k' and self.slots == 32:
            segment_count = 245750
        elif block_size == '4k' and self.slots == 64:
            segment_count = 122875
        elif block_size == '1m' and self.slots == 16:
            segment_count = 1920
        elif block_size == '1m' and self.slots == 32:
            segment_count = 960
        elif block_size == '1m' and self.slots == 64:
            segment_count = 480
        elif block_size == '4m' and self.slots == 16:
            segment_count = 480
        elif block_size == '4m' and self.slots == 32:
            segment_count = 240
        elif block_size == '4m' and self.slots == 64:
            segment_count = 120

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None, None, createsvc)

            pool_uuid = self.pool.get_uuid_str()
            svc_list = ""
            for i in range(createsvc):
                svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":"
            svc_list = svc_list[:-1]

            ior_utils.run_ior(self.hostfile_clients, ior_flags, iteration,
                              block_size, transfer_size, pool_uuid, svc_list,
                              record_size, stripe_size, stripe_count, async_io,
                              object_class, self.basepath, self.slots,
                              segment_count)

        except (ior_utils.IorFailed, DaosApiError) as excep:
            self.fail("<SegCount Test FAILED>.{}".format(excep))
示例#11
0
文件: soak.py 项目: daos-stack/daos
class Soak(Test):
    """
    Test class Description: DAOS Soak test cases
    """

    def job_done(self, args):
        """
        This is a callback function called when a job is done

        handle --which job, i.e. the job ID
        state  --string indicating job completion status
        """

        self.soak_results[args["handle"]] = args["state"]


    def create_pool(self):
        """
        Creates a pool that the various tests use for storage.
        """

        createmode = self.params.get("mode", '/run/pool1/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool1/createset/')
        createsize = self.params.get("size", '/run/pool1/createsize/')
        self.createsvc = self.params.get("svcn", '/run/pool1/createsvc/')

        self.pool = DaosPool(self.context)
        self.pool.create(createmode, createuid, creategid,
                         createsize, createsetid, None, None,
                         self.createsvc)

    def build_ior_script(self, job):
        """
        Builds an IOR command string which is then added to slurm script

        job --which job to read in the yaml file

        """

        # for the moment build IOR
        #IorUtils.build_ior(self.basepath)

        # read job info
        job_params = "/run/" + job + "/"
        job_name = self.params.get("name", job_params)
        job_nodes = self.params.get("nodes", job_params)
        job_processes = self.params.get("process_per_node",
                                        job_params)
        job_spec = self.params.get("jobspec", job_params)

        # read ior cmd info
        spec = "/run/" + job_spec + "/"
        iteration = self.params.get("iter", spec + 'iteration/')
        ior_flags = self.params.get("F", spec + 'iorflags/')
        transfer_size = self.params.get("t", spec + 'transfersize/')
        record_size = self.params.get("r", spec + 'recordsize/*')
        stripe_size = self.params.get("s", spec + 'stripesize/*')
        stripe_count = self.params.get("c", spec + 'stripecount/')
        async_io = self.params.get("a", spec + 'asyncio/')
        object_class = self.params.get("o", spec + 'objectclass/')

        self.partition = self.params.get("partition",
                                         '/run/hosts/test_machines/')

        pool_uuid = self.pool.get_uuid_str()
        tmplist = []
        svc_list = ""
        for i in range(self.createsvc):
            tmplist.append(int(self.pool.svc.rl_ranks[i]))
            svc_list += str(tmplist[i]) + ":"
        svc_list = svc_list[:-1]

        block_size = '1536m'

        if stripe_size == '8m':
            transfer_size = stripe_size

        hostfile = os.path.join(self.tmpdir, "ior_hosts_" + job_name)

        cmd = ior_utils.get_ior_cmd(ior_flags, iteration, block_size,
                                    transfer_size, pool_uuid, svc_list,
                                    record_size, stripe_size, stripe_count,
                                    async_io, object_class, self.basepath,
                                    hostfile, job_processes)

        output = os.path.join(self.tmpdir, job_name + "_results.out")
        script = slurm_utils.write_slurm_script(self.tmpdir, job_name,
                                                output, int(job_nodes), [cmd])
        return script

    def setUp(self):

        # intermediate results are stored in this global
        # start off with it empty
        self.soak_results = {}

        self.partition = None

        # initialize anything we rely on existing
        self.pool = None
        self.hostlist_servers = None

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as thefile:
            build_paths = json.load(thefile)
        self.basepath = os.path.normpath(build_paths['PREFIX']  + "/../")

        # workdir was not successful, not sure why right now
        self.tmpdir = self.basepath + "/install/tmp"
        try:
            os.makedirs(self.tmpdir)
        except:
            pass

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        # start the servers
        self.hostlist_servers = self.params.get("daos_servers",
                                                '/run/hosts/test_machines/*')
        filename = write_host_file.write_host_file(self.hostlist_servers,
                                                   self.workdir)
        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')
        print("Servers {} group {} basepath {}".format(self.hostlist_servers,
                                                       self.server_group,
                                                       self.basepath))
        server_utils.run_server(filename, self.server_group, self.basepath)

        # setup the storage
        self.create_pool()

    def tearDown(self):
        server_utils.stop_server(hosts=self.hostlist_servers)

    def test_soak_1(self):
        """
        Test ID: DAOS-2192
        Test Description: This test runs 2 DAOS API IOR jobs.
        :avocado: tags=soak1
        """

        try:
            # turn job parameters into slurm script
            script1 = self.build_ior_script('job1')

            # queue it up to run and register a callback to retrieve results
            job_id1 = slurm_utils.run_slurm_script(script1)
            slurm_utils.register_for_job_results(job_id1, self, maxwait=3600)

            # queue up a second job
            script2 = self.build_ior_script('job2')
            job_id2 = slurm_utils.run_slurm_script(script2)
            slurm_utils.register_for_job_results(job_id2, self, maxwait=3600)            

            # wait for all the jobs to finish
            while len(self.soak_results) < 2:
                time.sleep(10)

            for job, result in self.soak_results.iteritems():
                if result != "COMPLETED":
                    self.fail("Soak job: {} didn't complete as expected: {}".
                              format(job, result))

        except (DaosApiError, ior_utils.IorFailed) as error:
            self.fail("<Soak Test 1 Failed>\n {}".format(error))
        finally:
            try:
                os.remove(script1)
            except StandardError:
                pass
            try:
                os.remove(script2)
            except StandardError:
                pass

    def test_soak_2(self):
        """
        Test ID: DAOS-2192
        Test Description: This test verifies that a dmg script can be submitted.
        :avocado: tags=soak2
        """

        script = None
        try:
            dmgcmds = dmg_utils.get_dmg_script("dmg1", self.params,
                                               self.basepath)

            s2_job1_name = self.params.get("name", '/run/job3/')
            s2_job1_nodes = self.params.get("nodes", '/run/job3/')

            output = os.path.join(self.tmpdir, s2_job1_name + "_results.out")

            script = slurm_utils.write_slurm_script(self.tmpdir, s2_job1_name,
                                                    output,
                                                    s2_job1_nodes, dmgcmds)
            job_id = slurm_utils.run_slurm_script(script)
            slurm_utils.register_for_job_results(job_id, self, maxwait=3600)

            # wait for all the jobs to finish
            while len(self.soak_results) < 1:
                time.sleep(10)

            for job, result in self.soak_results.iteritems():
                if result != "COMPLETED":
                    self.fail("Soak job: {} didn't complete as expected: {}".
                              format(job, result))

        except (DaosApiError, ior_utils.IorFailed) as error:
            self.fail("Soak Test 2 Failed/n {}".format(error))
        finally:
            try:
                os.remove(script)
            finally:
                pass

    def test_soak_3(self):
        """
        Test ID: DAOS-2192
        Test Description: this time try a dmg command combined with IOR run
        Use Cases:
        :avocado: tags=soak3
        """

        script1 = None
        script2 = None
        try:
            # retrieve IOR job parameters
            script1 = self.build_ior_script('job1')
            job_id1 = slurm_utils.run_slurm_script(script1)
            slurm_utils.register_for_job_results(job_id1, self, maxwait=3600)

            # now do the dmg job
            dmgcmds = dmg_utils.get_dmg_script("dmg1", self.params,
                                               self.basepath)

            s3_job2_name = self.params.get("name", '/run/job3/')
            s3_job2_nodes = self.params.get("nodes", '/run/job3/')
            output = os.path.join(self.tmpdir, s3_job2_name + "_results.out")
            script2 = slurm_utils.write_slurm_script(self.tmpdir, s3_job2_name,
                                                     output, s3_job2_nodes,
                                                     dmgcmds)
            job_id2 = slurm_utils.run_slurm_script(script2)
            slurm_utils.register_for_job_results(job_id2, self, maxwait=3600)

            # wait for all the jobs to finish
            while len(self.soak_results) < 2:
                time.sleep(10)

            for job, result in self.soak_results.iteritems():
                if result != "COMPLETED":
                    self.fail("Soak job: {} didn't complete as expected: {}".
                              format(job, result))

        except (DaosApiError, ior_utils.IorFailed) as error:
            self.fail("Soak Test 3 Failed\n {}".format(error))
        finally:
            try:
                os.remove(script1)
            except StandardError:
                pass
            try:
                os.remove(script2)
            except StandardError:
                pass
示例#12
0
class DestroyRebuild(Test):

    """
    Test Class Description:
    This test verifies destruction of a pool that is rebuilding.

    :avocado: tags=pool,pooldestroy,rebuild,desreb
    """

    build_paths = []
    server_group = ""
    context = None
    pool = None
    hostfile = ""

    def setUp(self):
        """ setup for the test """
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        # generate a hostfile
        self.hostlist = self.params.get("test_machines", '/run/hosts/')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        # fire up the DAOS servers
        self.server_group = self.params.get("name", '/run/server_config/',
                                            'daos_server')
        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group,
                                build_paths['PREFIX'] + '/../')

        # create a pool to test with
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = self.params.get("uid", '/run/pool/createuid/')
        creategid = self.params.get("gid", '/run/pool/creategid/')
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        self.pool = DaosPool(self.context)
        self.pool.create(createmode, createuid, creategid, createsize,
                         createsetid)
        self.pool.get_uuid_str()

        time.sleep(2)

    def tearDown(self):
        """ cleanup after the test """

        try:
            os.remove(self.hostfile)
            if self.pool:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)


    def test_destroy_while_rebuilding(self):
        """
        :avocado: tags=pool,pooldestroy,rebuild,desreb
        """
        try:
            print("\nsetup complete, starting test\n")

            # create a server object that references on of our pool target hosts
            # and then kill it
            svr_to_kill = int(self.params.get("rank_to_kill",
                                              '/run/testparams/ranks/'))
            server = DaosServer(self.context, bytes(self.server_group),
                                svr_to_kill)

            print("created server ")

            # BUG if you don't connect the rebuild doesn't start correctly
            self.pool.connect(1 << 1)
            status = self.pool.pool_query()
            if not status.pi_ntargets == len(self.hostlist):
                self.fail("target count wrong.\n")
            if not status.pi_ndisabled == 0:
                self.fail("disabled target count wrong.\n")

            print("connect ")

            time.sleep(1)
            server.kill(1)

            print("killed server ")

            # exclude the target from the dead server
            self.pool.exclude([svr_to_kill])

            print("exclude target ")

            #self.pool.disconnect()
            #print "disconnect "

            # the rebuild won't take long since there is no data so do
            # the destroy quickly
            self.pool.destroy(1)
            print("destroy ")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")
示例#13
0
class PoolSvc(Test):
    """
    Tests svc argument while pool create.

    """
    def setUp(self):
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as f:
            build_paths = json.load(f)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        self.tmp = build_paths['PREFIX'] + '/tmp'

        self.server_group = self.params.get("server_group", '/server/',
                                            'daos_server')
        self.daosctl = self.basepath + '/install/bin/daosctl'

        # setup the DAOS python API
        self.Context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.POOL = None

        self.hostfile = None
        hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = WriteHostFile.WriteHostFile(hostlist, self.tmp)
        print("Host file is: {}".format(self.hostfile))

        ServerUtils.runServer(self.hostfile, self.server_group, self.basepath)
        time.sleep(5)

    def tearDown(self):
        if self.hostfile is not None:
            os.remove(self.hostfile)
        if self.POOL is not None and self.POOL.attached:
            self.POOL.destroy(1)

        ServerUtils.stopServer()

    def test_poolsvc(self):
        """
        Test svc arg during pool create.

        :avocado: tags=pool,svc
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createsvc = self.params.get("svc", '/run/createtests/createsvc/*/')

        expected_result = createsvc[1]

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.POOL = DaosPool(self.Context)
            self.POOL.create(createmode, createuid, creategid, createsize,
                             createsetid, None, None, createsvc[0])
            self.POOL.connect(1 << 1)

            if (createsvc[0] == 3):
                self.POOL.disconnect()
                cmd = ('{0} kill-leader  --uuid={1}'.format(
                    self.daosctl, self.POOL.get_uuid_str()))
                process.system(cmd)
                time.sleep(5)
                self.POOL.connect(1 << 1)
                self.POOL.disconnect()
                server = DaosServer(self.Context, self.server_group, 1)
                server.kill(1)
                time.sleep(5)
                self.POOL.connect(1 << 1)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

            # cleanup the pool
            self.POOL.disconnect()
            self.POOL.destroy(1)
            self.POOL = None

        except ValueError as e:
            print e
            print traceback.format_exc()
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
示例#14
0
class PoolSvc(Test):
    """
    Tests svc argument while pool create.
    """
    def setUp(self):
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')
        self.daosctl = self.basepath + '/install/bin/daosctl'

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.pool = None

        self.hostfile = None
        self.hostlist = self.params.get("test_machines", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)
        print("Host file is: {}".format(self.hostfile))

        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)

    def test_poolsvc(self):
        """
        Test svc arg during pool create.

        :avocado: tags=pool,svc
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createsvc = self.params.get("svc", '/run/createtests/createsvc/*/')

        expected_result = createsvc[1]

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None, None, createsvc[0])
            self.pool.connect(1 << 1)

            # checking returned rank list for server more than 1
            i = 0
            while (
                    int(self.pool.svc.rl_ranks[i]) > 0 and
                    int(self.pool.svc.rl_ranks[i]) <= createsvc[0] and
                    int(self.pool.svc.rl_ranks[i]) != 999999
            ):
                i += 1
            if i != createsvc[0]:
                self.fail("Length of Returned Rank list is not equal to "
                          "the number of Pool Service members.\n")
            rank_list = []
            for j in range(createsvc[0]):
                rank_list.append(int(self.pool.svc.rl_ranks[j]))
                if len(rank_list) != len(set(rank_list)):
                    self.fail("Duplicate values in returned rank list")

            if createsvc[0] == 3:
                self.pool.disconnect()
                cmd = ('{0} kill-leader  --uuid={1}'
                       .format(self.daosctl, self.pool.get_uuid_str()))
                process.system(cmd)
                self.pool.connect(1 << 1)
                self.pool.disconnect()
                server = DaosServer(self.context, self.server_group, 2)
                server.kill(1)
                self.pool.exclude([2])
                self.pool.connect(1 << 1)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result == 'PASS':
                self.fail("Test was expected to pass but it failed.\n")
示例#15
0
class EightServers(Test):
    """
    Test class Description: Runs IOR with 8 servers.

    """
    def __init__(self, *args, **kwargs):

        super(EightServers, self).__init__(*args, **kwargs)

        self.basepath = None
        self.server_group = None
        self.context = None
        self.pool = None
        self.num_procs = None
        self.hostlist_servers = None
        self.hostfile_servers = None
        self.hostlist_clients = None
        self.hostfile_clients = None
        self.mpio = None

    def setUp(self):
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        print("<<{}>>".format(self.basepath))
        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.hostlist_servers = self.params.get("test_servers",
                                                '/run/hosts/test_machines/*')
        self.hostfile_servers = (write_host_file.write_host_file(
            self.hostlist_servers, self.workdir))
        print("Host file servers is: {}".format(self.hostfile_servers))

        self.hostlist_clients = self.params.get("test_clients",
                                                '/run/hosts/test_machines/*')
        self.num_procs = self.params.get("np", '/run/ior/client_processes/*')
        self.hostfile_clients = (write_host_file.write_host_file(
            self.hostlist_clients, self.workdir, None))
        print("Host file clients is: {}".format(self.hostfile_clients))

        server_utils.run_server(self.hostfile_servers, self.server_group,
                                self.basepath)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            server_utils.stop_server(hosts=self.hostlist_servers)

    def executable(self, iorflags=None):
        """
        Executable function to run ior for ssf and fpp
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createscm_size = self.params.get("scm_size", '/run/pool/createsize/')
        createnvme_size = self.params.get("nvme_size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        block_size = self.params.get("b", '/run/ior/transfersize_blocksize/*/')
        transfer_size = self.params.get("t",
                                        '/run/ior/transfersize_blocksize/*/')

        try:
            # initialize MpioUtils
            self.mpio = MpioUtils()
            if self.mpio.mpich_installed(self.hostlist_clients) is False:
                self.fail("Exiting Test: Mpich not installed")

            #print self.mpio.mpichinstall
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createscm_size,
                             createsetid, None, None, createsvc,
                             createnvme_size)

            pool_uuid = self.pool.get_uuid_str()
            svc_list = ""
            for i in range(createsvc):
                svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":"
            svc_list = svc_list[:-1]

            print("svc_list: {}".format(svc_list))

            ior_utils.run_ior_mpiio(self.basepath, self.mpio.mpichinstall,
                                    pool_uuid, svc_list, self.num_procs,
                                    self.hostfile_clients, iorflags, iteration,
                                    transfer_size, block_size, True)

        except (DaosApiError, MpioFailed) as excep:
            print(excep)

    def test_ssf(self):
        """
        Test ID: DAOS-2121
        Test Description: Run IOR with 1,64 and 128 clients config in ssf mode.
        Use Cases: Different combinations of 1/64/128 Clients,
                   1K/4K/32K/128K/512K/1M transfersize and block size of 32M
                   for 1K transfer size and 128M for rest.
        :avocado: tags=ior,mpiio,eightservers,ior_ssf
        """
        ior_flags = self.params.get("F", '/run/ior/iorflags/ssf/')
        self.executable(ior_flags)

    def test_fpp(self):
        """
        Test ID: DAOS-2121
        Test Description: Run IOR with 1,64 and 128 clients config in fpp mode.
        Use Cases: Different combinations of 1/64/128 Clients,
                   1K/4K/32K/128K/512K/1M transfersize and block size of 32M
                   for 1K transfer size and 128M for rest.
        :avocado: tags=ior,mpiio,eightservers,ior_fpp
        """
        ior_flags = self.params.get("F", '/run/ior/iorflags/fpp/')
        self.executable(ior_flags)
示例#16
0
class FourServers(TestWithServers):
    """
    Test class Description: Runs IOR with four servers.
    :avocado: recursive
    """
    def setUp(self):
        super(FourServers, self).setUp()

        #set client variables
        self.hostfile_clients = (
            write_host_file.write_host_file(self.hostlist_clients,
                                            self.workdir, None))

    def test_fourservers(self):
        """
        Jira ID: DAOS-1263
        Test Description: Test IOR with four servers.
        Use Cases: Different combinations of 1/64/128 Clients,
                   1K/4K/32K/128K/512K/1M transfer size.
        :avocado: tags=ior,fourservers
        """

        # parameters used in pool create
        createmode = self.params.get("mode_RW", '/run/pool/createmode/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')

        # ior parameters
        iteration = self.params.get("iter", '/run/ior/iteration/')
        client_processes = self.params.get("np", '/run/ior/clientslots/*')
        ior_flags = self.params.get("F", '/run/ior/iorflags/')
        transfer_size = self.params.get("t",
                                        '/run/ior/transfersize_blocksize/*/')
        block_size = self.params.get("b",
                                     '/run/ior/transfersize_blocksize/*/')
        object_class = self.params.get("o", '/run/ior/objectclass/')

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None, None, createsvc)

            pool_uuid = self.pool.get_uuid_str()
            tmp_rank_list = []
            svc_list = ""
            for i in range(createsvc):
                tmp_rank_list.append(int(self.pool.svc.rl_ranks[i]))
                svc_list += str(tmp_rank_list[i]) + ":"
            svc_list = svc_list[:-1]

            ior_utils.run_ior_daos(self.hostfile_clients, ior_flags, iteration,
                                   block_size, transfer_size, pool_uuid,
                                   svc_list, object_class, self.basepath,
                                   client_processes)

        except (DaosApiError, ior_utils.IorFailed) as excep:
            self.fail("<FourServers Test run Failed>\n {}".format(excep))
示例#17
0
class MultipleClients(Test):
    """
    Test class Description: Runs IOR with multiple clients.

    """
    def setUp(self):
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')
        self.daosctl = self.basepath + '/install/bin/daosctl'

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.pool = None

        self.hostlist_servers = self.params.get("test_servers",
                                                '/run/hosts/test_machines/*')
        self.hostfile_servers = (
            write_host_file.write_host_file(self.hostlist_servers,
                                            self.workdir))
        print("Host file servers is: {}".format(self.hostfile_servers))

        self.hostlist_clients = (
            self.params.get("clients",
                            '/run/hosts/test_machines/test_clients/*'))
        self.hostfile_clients = (
            write_host_file.write_host_file(self.hostlist_clients,
                                            self.workdir))
        print("Host file clientsis: {}".format(self.hostfile_clients))

        self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                   self.hostlist_servers,
                                                   self.hostlist_clients)
        server_utils.run_server(self.hostfile_servers, self.server_group,
                                self.basepath)

        if int(str(self.name).split("-")[0]) == 1:
            ior_utils.build_ior(self.basepath)

    def tearDown(self):
        try:
            if self.hostfile_clients is not None:
                os.remove(self.hostfile_clients)
            if self.hostfile_servers is not None:
                os.remove(self.hostfile_servers)
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist_clients,
                                      self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist_servers)

    def test_multipleclients(self):
        """
        Test ID: DAOS-1263
        Test Description: Test IOR with 16 and 32 clients config.
        Use Cases: Different combinations of 16/32 Clients, 8b/1k/4k
                   record size, 1m/8m stripesize and 16 async io.
        :avocado: tags=ior,twoservers,multipleclients
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        slots = self.params.get("slots", '/run/ior/clientslots/*')
        ior_flags = self.params.get("F", '/run/ior/iorflags/')
        transfer_size = self.params.get("t", '/run/ior/transfersize/')
        record_size = self.params.get("r", '/run/ior/recordsize/*')
        stripe_size = self.params.get("s", '/run/ior/stripesize/*')
        stripe_count = self.params.get("c", '/run/ior/stripecount/')
        async_io = self.params.get("a", '/run/ior/asyncio/')
        object_class = self.params.get("o", '/run/ior/objectclass/')

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None, None, createsvc)

            with open(self.hostfile_clients) as client_file:
                new_text = client_file.read().replace('slots=1',
                                                      'slots={0}').format(slots)

            with open(self.hostfile_clients, "w") as client_file:
                client_file.write(new_text)

            pool_uuid = self.pool.get_uuid_str()
            tmp_rank_list = []
            svc_list = ""
            for i in range(createsvc):
                tmp_rank_list.append(int(self.pool.svc.rl_ranks[i]))
                svc_list += str(tmp_rank_list[i]) + ":"
            svc_list = svc_list[:-1]

            if slots == 8:
                block_size = '3g'
            elif slots == 16:
                block_size = '1536m'

            if stripe_size == '8m':
                transfer_size = stripe_size

            ior_utils.run_ior(self.hostfile_clients, ior_flags, iteration,
                              block_size, transfer_size, pool_uuid, svc_list,
                              record_size, stripe_size, stripe_count, async_io,
                              object_class, self.basepath, slots)

        except (DaosApiError, ior_utils.IorFailed) as excep:
            self.fail("<MultipleClients Test run Failed>\n {}".format(excep))
示例#18
0
class TestPool(TestDaosApiBase):
    """A class for functional testing of DaosPools objects."""

    def __init__(self, context, log, cb_handler=None):
        """[summary].

        Args:
            context (DaosContext): [description]
            log (logging): logging object used to report the pool status
            cb_handler (CallbackHandler, optional): callback object to use with
                the API methods. Defaults to None.
        """
        super(TestPool, self).__init__(cb_handler)
        self.context = context
        self.log = log
        self.uid = os.geteuid()
        self.gid = os.getegid()

        self.mode = TestParameter(None)
        self.name = TestParameter(None)
        self.group = TestParameter(None)
        self.svcn = TestParameter(None)
        self.target_list = TestParameter(None)
        self.scm_size = TestParameter(None)
        self.nvme_size = TestParameter(None)

        self.pool = None
        self.uuid = None
        self.info = None
        self.connected = False

    def get_params(self, test, path="/run/pool/*"):
        """Get the pool parameters from the yaml file.

        Args:
            test (Test): avocado Test object
            path (str, optional): yaml namespace. Defaults to "/run/pool/*".
        """
        super(TestPool, self).get_params(test, path)

    @fail_on(DaosApiError)
    def create(self):
        """Create a pool.

        Destroys an existing pool if defined and assigns self.pool and
        self.uuid.
        """
        self.destroy()
        self.log.info("Creating a pool")
        self.pool = DaosPool(self.context)
        kwargs = {
            "mode": self.mode.value, "uid": self.uid, "gid": self.gid,
            "scm_size": self.scm_size.value, "group": self.name.value}
        for key in ("target_list", "svcn", "nvme_size"):
            value = getattr(self, key).value
            if value:
                kwargs[key] = value
        self._call_method(self.pool.create, kwargs)
        self.uuid = self.pool.get_uuid_str()

    @fail_on(DaosApiError)
    def connect(self, permission=1):
        """Connect to the pool.

        Args:
            permission (int, optional): connect permission. Defaults to 1.

        Returns:
            bool: True if the pool has been connected; False if the pool was
                already connected or the pool is not defined.

        """
        if self.pool and not self.connected:
            kwargs = {"flags": 1 << permission}
            self.log.info(
                "Connecting to pool %s with permission %s (flag: %s)",
                self.uuid, permission, kwargs["flags"])
            self._call_method(self.pool.connect, kwargs)
            self.connected = True
            return True
        return False

    @fail_on(DaosApiError)
    def disconnect(self):
        """Disconnect from connected pool.

        Returns:
            bool: True if the pool has been disconnected; False if the pool was
                already disconnected or the pool is not defined.

        """
        if self.pool and self.connected:
            self.log.info("Disonnecting from pool %s", self.uuid)
            self._call_method(self.pool.disconnect, {})
            self.connected = False
            return True
        return False

    @fail_on(DaosApiError)
    def destroy(self, force=1):
        """Destroy the pool.

        Args:
            force (int, optional): force flag. Defaults to 1.

        Returns:
            bool: True if the pool has been destoyed; False if the pool is not
                defined.

        """
        if self.pool:
            self.disconnect()
            self.log.info("Destroying pool %s", self.uuid)
            self._call_method(self.pool.destroy, {"force": force})
            self.pool = None
            self.uuid = None
            self.info = None
            return True
        return False

    @fail_on(DaosApiError)
    def get_info(self):
        """Query the pool for information.

        Sets the self.info attribute.
        """
        if self.pool:
            self.connect()
            self._call_method(self.pool.pool_query, {})
            self.info = self.pool.pool_info

    def check_pool_info(self, pi_uuid=None, pi_ntargets=None, pi_nnodes=None,
                        pi_ndisabled=None, pi_map_ver=None, pi_leader=None,
                        pi_bits=None):
        # pylint: disable=unused-argument
        """Check the pool info attributes.

        Args:
            pi_uuid (str, optional): pool uuid. Defaults to None.
            pi_ntargets (int, optional): number of targets. Defaults to None.
            pi_nnodes (int, optional): number of nodes. Defaults to None.
            pi_ndisabled (int, optional): number of disabled. Defaults to None.
            pi_map_ver (int, optional): pool map version. Defaults to None.
            pi_leader (int, optional): pool leader. Defaults to None.
            pi_bits (int, optional): pool bits. Defaults to None.

        Returns:
            bool: True if at least one expected value is specified and all the
                specified values match; False otherwise

        """
        self.get_info()
        checks = [
            (key,
             c_uuid_to_str(getattr(self.info, key))
             if key == "pi_uuid" else getattr(self.info, key),
             val)
            for key, val in locals().items()
            if key != "self" and val is not None]
        return self._check_info(checks)

    def check_pool_space(self, ps_free_min=None, ps_free_max=None,
                         ps_free_mean=None, ps_ntargets=None, ps_padding=None):
        # pylint: disable=unused-argument
        """Check the pool info space attributes.

        Args:
            ps_free_min (list, optional): minimum free space per device.
                Defaults to None.
            ps_free_max (list, optional): maximum free space per device.
                Defaults to None.
            ps_free_mean (list, optional): mean free space per device.
                Defaults to None.
            ps_ntargets (int, optional): number of targets. Defaults to None.
            ps_padding (int, optional): space padding. Defaults to None.

        Returns:
            bool: True if at least one expected value is specified and all the
                specified values match; False otherwise

        """
        self.get_info()
        checks = []
        for key in ("ps_free_min", "ps_free_max", "ps_free_mean"):
            val = locals()[key]
            if isinstance(val, list):
                for index, item in val:
                    checks.append((
                        "{}[{}]".format(key, index),
                        getattr(self.info.pi_space, key)[index],
                        item))
        for key in ("ps_ntargets", "ps_padding"):
            val = locals()[key]
            if val is not None:
                checks.append(key, getattr(self.info.pi_space, key), val)
        return self._check_info(checks)

    def check_pool_daos_space(self, s_total=None, s_free=None):
        # pylint: disable=unused-argument
        """Check the pool info daos space attributes.

        Args:
            s_total (list, optional): total space per device. Defaults to None.
            s_free (list, optional): free space per device. Defaults to None.

        Returns:
            bool: True if at least one expected value is specified and all the
                specified values match; False otherwise

        """
        self.get_info()
        checks = [
            ("{}_{}".format(key, index),
             getattr(self.info.pi_space.ps_space, key)[index],
             item)
            for key, val in locals().items()
            if key != "self" and val is not None
            for index, item in enumerate(val)]
        return self._check_info(checks)

    def check_rebuild_status(self, rs_version=None, rs_pad_32=None,
                             rs_errno=None, rs_done=None,
                             rs_toberb_obj_nr=None, rs_obj_nr=None,
                             rs_rec_nr=None):
        # pylint: disable=unused-argument
        """Check the pool info rebuild attributes.

        Args:
            rs_version (int, optional): rebuild version. Defaults to None.
            rs_pad_32 (int, optional): rebuild pad. Defaults to None.
            rs_errno (int, optional): rebuild error number. Defaults to None.
            rs_done (int, optional): rebuild done flag. Defaults to None.
            rs_toberb_obj_nr (int, optional): number of objects to be rebuilt.
                Defaults to None.
            rs_obj_nr (int, optional): number of rebuilt objects.
                Defaults to None.
            rs_rec_nr (int, optional): number of rebuilt records.
                Defaults to None.

        Returns:
            bool: True if at least one expected value is specified and all the
                specified values match; False otherwise

        """
        self.get_info()
        checks = [
            (key, getattr(self.info.pi_rebuild_st, key), val)
            for key, val in locals().items()
            if key != "self" and val is not None]
        return self._check_info(checks)

    def _check_info(self, check_list):
        """Verify each pool info attribute value matches an expected value.

        Args:
            check_list (list): a list of tuples containing the name of the pool
                information attribute to check, the current value of the
                attribute, and the expected value of the attribute.

        Returns:
            bool: True if at least one check has been specified and all the
            actual and expected values match; False otherwise.

        """
        check_status = len(check_list) > 0
        for check, actual, expect in check_list:
            self.log.info(
                "Verifying the pool %s: %s ?= %s", check, actual, expect)
            if actual != expect:
                msg = "The {} does not match: actual: {}, expected: {}".format(
                    check, actual, expect)
                self.log.error(msg)
                check_status = False
        return check_status

    def rebuild_complete(self):
        """Determine if the pool rebuild is complete.

        Returns:
            bool: True if pool rebuild is complete; False otherwise

        """
        self.get_info()
        return self.info.pi_rebuild_st.rs_done == 1

    def wait_for_rebuild(self, to_start, interval=1):
        """Wait for the rebuild to start or end.

        Args:
            to_start (bool): whether to wait for rebuild to start or end
            interval (int): number of seconds to wait in between rebuild
                completion checks
        """
        self.log.info(
            "Waiting for rebuild to %s ...",
            "start" if to_start else "complete")
        while self.rebuild_complete() == to_start:
            self.log.info(
                "  Rebuild %s ...",
                "has not yet started" if to_start else "in progress")
            sleep(interval)
        self.log.info(
            "Rebuild %s detected", "start" if to_start else "completion")

    @fail_on(DaosApiError)
    def start_rebuild(self, server_group, rank, daos_log):
        """Kill a specific server rank using this pool.

        Args:
            server_group (str): daos server group name
            rank (int): daos server rank to kill
            daos_log (DaosLog): object for logging messages
        """
        msg = "Killing DAOS server {} (rank {})".format(server_group, rank)
        self.log.info(msg)
        daos_log.info(msg)
        server = DaosServer(self.context, server_group, rank)
        server.kill(1)
        msg = "Excluding server rank {} from pool {}".format(rank, self.uuid)
        self.log.info(msg)
        daos_log.info(msg)
        self.pool.exclude([rank])

    def check_files(self, hosts):
        """Check if pool files exist on the specified list of hosts.

        Args:
            hosts (list): list of hosts

        Returns:
            bool: True if the files for this pool exist on each host; False
                otherwise

        """
        return check_pool_files(self.log, hosts, self.uuid.lower())

    def write_file(self, orterun, processes, hostfile, size, timeout=60):
        """Write a file to the pool.

        Args:
            orterun (str): full path to the orterun command
            processes (int): number of processes to launch
            hosts (list): list of clients from which to write the file
            size (int): size of the file to create in bytes
            timeout (int, optional): number of seconds before timing out the
                command. Defaults to 60 seconds.

        Returns:
            process.CmdResult: command execution result

        """
        self.log.info("Writing {} bytes to pool {}".format(size, self.uuid))
        env = {
            "DAOS_POOL": self.uuid,
            "DAOS_SVCL": "1",
            "DAOS_SINGLETON_CLI": "1",
        }
        current_path = os.path.dirname(os.path.abspath(__file__))
        command = "{} --np {} --hostfile {} {} {} testfile".format(
            orterun, processes, hostfile,
            os.path.join(current_path, "write_some_data.py"), size)
        return process.run(command, timeout, True, False, "both", True, env)
示例#19
0
class RebuildNoCap(Test):
    """
    Test Class Description:
    This class contains tests for pool rebuild.

    :avocado: tags=pool,rebuild,nocap
    """

    build_paths = []
    server_group = ""
    CONTEXT = None
    POOL = None
    hostfile = ""

    def setUp(self):
        """ setup for the test """

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as f:
            build_paths = json.load(f)
        self.CONTEXT = DaosContext(build_paths['PREFIX'] + '/lib/')

        # generate a hostfile
        self.host_list = self.params.get("test_machines", '/run/hosts/')
        tmp = build_paths['PREFIX'] + '/tmp'
        self.hostfile = WriteHostFile.WriteHostFile(self.host_list, tmp)

        # fire up the DAOS servers
        self.server_group = self.params.get("server_group", '/run/server/',
                                            'daos_server')
        ServerUtils.runServer(self.hostfile, self.server_group,
                              build_paths['PREFIX'] + '/../')
        time.sleep(3)

        # create a pool to test with
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = self.params.get("uid", '/run/pool/createuid/')
        creategid = self.params.get("gid", '/run/pool/creategid/')
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        self.POOL = DaosPool(self.CONTEXT)
        self.POOL.create(createmode, createuid, creategid, createsize,
                         createsetid)
        uuid = self.POOL.get_uuid_str()

        time.sleep(2)

        # stuff some bogus data into the pool
        how_many_bytes = long(
            self.params.get("datasize", '/run/testparams/datatowrite/'))
        exepath = build_paths['PREFIX'] +\
                  "/../src/tests/ftest/util/WriteSomeData.py"
        cmd = "export DAOS_POOL={0}; export DAOS_SVCL=1; mpirun"\
              " --np 1 --host {1} {2} {3} testfile".format(
                  uuid, self.host_list[0], exepath, how_many_bytes)
        subprocess.call(cmd, shell=True)

    def tearDown(self):
        """ cleanup after the test """

        os.remove(self.hostfile)
        self.POOL.destroy(1)
        ServerUtils.stopServer()

    def test_rebuild_no_capacity(self):
        """
        :avocado: tags=pool,rebuild,nocap
        """
        try:
            print "\nsetup complete, starting test\n"

            # create a server object that references on of our pool target hosts
            # and then kill it
            svr_to_kill = int(
                self.params.get("rank_to_kill", '/run/testparams/ranks/'))
            sh = DaosServer(self.CONTEXT, bytes(self.server_group),
                            svr_to_kill)

            time.sleep(1)
            sh.kill(1)

            # exclude the target from the dead server
            self.POOL.exclude([svr_to_kill])

            # exclude should trigger rebuild, check
            self.POOL.connect(1 << 1)
            status = self.POOL.pool_query()
            if not status.pi_ntargets == len(self.host_list):
                self.fail("target count wrong.\n")
            if not status.pi_ndisabled == 1:
                self.fail("disabled target count wrong.\n")

            # the pool should be too full to start a rebuild so
            # expecting an error
            # not sure yet specifically what error
            if status.pi_rebuild_st[2] == 0:
                self.fail("expecting rebuild to fail but it didn't.\n")

        except ValueError as e:
            print(e)
            print(traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")
示例#20
0
class NvmeIo(avocado.Test):
    """
    Test Class Description:
        Test the general Metadata operations and boundary conditions.
    """

    def setUp(self):
        self.pool = None
        self.hostlist = None
        self.hostfile_clients = None
        self.hostfile = None
        self.out_queue = None
        self.pool_connect = False

        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)

        self.basepath = os.path.normpath(build_paths['PREFIX']  + "/../")
        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("servers", '/run/hosts/*')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)
        #Start Server
        server_utils.run_server(self.hostfile, self.server_group, self.basepath)

    def tearDown(self):
        try:
            if self.pool_connect:
                self.pool.disconnect()
                self.pool.destroy(1)
        finally:
            server_utils.stop_server(hosts=self.hostlist)

    def verify_pool_size(self, original_pool_info, ior_args):
        """
        Function is to validate the pool size
        original_pool_info: Pool info prior to IOR
        ior_args: IOR args to calculate the file size
        """
        #Get the current pool size for comparison
        current_pool_info = self.pool.pool_query()
        #if Transfer size is < 4K, Pool size will verified against NVMe, else
        #it will be checked against SCM
        if ior_args['stripe_size'] >= 4096:
            print("Size is > 4K,Size verification will be done with NVMe size")
            storage_index = 1
        else:
            print("Size is < 4K,Size verification will be done with SCM size")
            storage_index = 0

        free_pool_size = (
            original_pool_info.pi_space.ps_space.s_free[storage_index]
            - current_pool_info.pi_space.ps_space.s_free[storage_index])

        obj_multiplier = 1
        replica_number = re.findall(r'\d+', "ior_args['object_class']")
        if replica_number:
            obj_multiplier = int(replica_number[0])
        expected_pool_size = (ior_args['slots'] * ior_args['block_size'] *
                              obj_multiplier)

        if free_pool_size < expected_pool_size:
            raise DaosTestError(
                'Pool Free Size did not match Actual = {} Expected = {}'
                .format(free_pool_size, expected_pool_size))

    @avocado.fail_on(DaosApiError)
    def test_nvme_io(self):
        """
        Test ID: DAOS-2082
        Test Description: Test will run IOR with standard and non standard
        sizes.IOR will be run for all Object type supported. Purpose is to
        verify pool size (SCM and NVMe) for IOR file.
        This test is running multiple IOR on same server start instance.
        :avocado: tags=nvme,nvme_io,large
        """
        ior_args = {}

        hostlist_clients = self.params.get("clients", '/run/hosts/*')
        tests = self.params.get("ior_sequence", '/run/ior/*')
        object_type = self.params.get("object_type", '/run/ior/*')
        #Loop for every IOR object type
        for obj_type in object_type:
            for ior_param in tests:
                self.hostfile_clients = write_host_file.write_host_file(
                    hostlist_clients,
                    self.workdir,
                    ior_param[4])
                #There is an issue with NVMe if Transfer size>64M, Skipped this
                #sizes for now
                if ior_param[2] > 67108864:
                    print ("Xfersize > 64M getting failed, DAOS-1264")
                    continue

                self.pool = DaosPool(self.context)
                self.pool.create(self.params.get("mode",
                                                 '/run/pool/createmode/*'),
                                 os.geteuid(),
                                 os.getegid(),
                                 ior_param[0],
                                 self.params.get("setname",
                                                 '/run/pool/createset/*'),
                                 nvme_size=ior_param[1])
                self.pool.connect(1 << 1)
                self.pool_connect = True
                createsvc = self.params.get("svcn", '/run/pool/createsvc/')
                svc_list = ""
                for i in range(createsvc):
                    svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":"
                svc_list = svc_list[:-1]

                ior_args['client_hostfile'] = self.hostfile_clients
                ior_args['pool_uuid'] = self.pool.get_uuid_str()
                ior_args['svc_list'] = svc_list
                ior_args['basepath'] = self.basepath
                ior_args['server_group'] = self.server_group
                ior_args['tmp_dir'] = self.workdir
                ior_args['iorflags'] = self.params.get("iorflags",
                                                       '/run/ior/*')
                ior_args['iteration'] = self.params.get("iteration",
                                                        '/run/ior/*')
                ior_args['stripe_size'] = ior_param[2]
                ior_args['block_size'] = ior_param[3]
                ior_args['stripe_count'] = self.params.get("stripecount",
                                                           '/run/ior/*')
                ior_args['async_io'] = self.params.get("asyncio",
                                                       '/run/ior/*')
                ior_args['object_class'] = obj_type
                ior_args['slots'] = ior_param[4]

                #IOR is going to use the same --daos.stripeSize,
                #--daos.recordSize and Transfer size.
                try:
                    size_before_ior = self.pool.pool_query()
                    ior_utils.run_ior(ior_args['client_hostfile'],
                                      ior_args['iorflags'],
                                      ior_args['iteration'],
                                      ior_args['block_size'],
                                      ior_args['stripe_size'],
                                      ior_args['pool_uuid'],
                                      ior_args['svc_list'],
                                      ior_args['stripe_size'],
                                      ior_args['stripe_size'],
                                      ior_args['stripe_count'],
                                      ior_args['async_io'],
                                      ior_args['object_class'],
                                      ior_args['basepath'],
                                      ior_args['slots'],
                                      filename=str(uuid.uuid4()),
                                      display_output=True)
                    self.verify_pool_size(size_before_ior, ior_args)
                except ior_utils.IorFailed as exe:
                    print (exe)
                    print (traceback.format_exc())
                    self.fail()
                try:
                    if self.pool_connect:
                        self.pool.disconnect()
                        self.pool_connect = False
                    if self.pool:
                        self.pool.destroy(1)
                except DaosApiError as exe:
                    print (exe)
                    self.fail("Failed to Destroy/Disconnect the Pool")
示例#21
0
文件: seg_count.py 项目: morsiee/daos
class SegCount(Test):
    """
    Test class Description: Runs IOR with different segment counts.

    :avocado: recursive
    """
    def __init__(self, *args, **kwargs):

        super(SegCount, self).__init__(*args, **kwargs)

        self.basepath = None
        self.context = None
        self.pool = None
        self.slots = None
        self.hostlist_servers = None
        self.hostfile_clients = None

    def setUp(self):
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.hostlist_servers = self.params.get("test_servers", '/run/hosts/*')
        hostfile_servers = (write_host_file.write_host_file(
            self.hostlist_servers, self.workdir))
        print("Host file servers is: {}".format(hostfile_servers))

        self.hostlist_clients = self.params.get("test_clients", '/run/hosts/*')
        self.slots = self.params.get("slots", '/run/ior/clientslots/*')
        self.hostfile_clients = (write_host_file.write_host_file(
            self.hostlist_clients, self.workdir, self.slots))
        print("Host file clients is: {}".format(self.hostfile_clients))

        self.agent_sessions = agent_utils.run_agent(self.basepath,
                                                    self.hostlist_servers,
                                                    self.hostlist_clients)
        server_utils.run_server(hostfile_servers, self.server_group,
                                self.basepath)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                agent_utils.stop_agent(self.agent_sessions,
                                       self.hostlist_clients)
            server_utils.stop_server(hosts=self.hostlist_servers)

    def test_segcount(self):
        """
        Test ID: DAOS-1782
        Test Description: Run IOR with 32,64 and 128 clients with different
                          segment counts.
        Use Cases: Different combinations of 32/64/128 Clients, 8b/1k/4k
                   record size, 1k/4k/1m/8m transfersize and stripesize
                   and 16 async io.
        :avocado: tags=ior,eightservers,ior_segcount,performance
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        ior_flags = self.params.get("F", '/run/ior/iorflags/')
        stripe_count = self.params.get("c", '/run/ior/stripecount/')
        async_io = self.params.get("a", '/run/ior/asyncio/')
        object_class = self.params.get("o", '/run/ior/objectclass/*/')
        record_size = self.params.get("r", '/run/ior/recordsize/*')
        block_size = (self.params.get(
            "b", '/run/ior/blocksize_transfersize_stripesize/*/'))
        transfer_size = (self.params.get(
            "t", '/run/ior/blocksize_transfersize_stripesize/*/'))
        stripe_size = (self.params.get(
            "s", '/run/ior/blocksize_transfersize_stripesize/*/'))

        if block_size == '4k' and self.slots == 16:
            segment_count = 491500
        elif block_size == '4k' and self.slots == 32:
            segment_count = 245750
        elif block_size == '4k' and self.slots == 64:
            segment_count = 122875
        elif block_size == '1m' and self.slots == 16:
            segment_count = 1920
        elif block_size == '1m' and self.slots == 32:
            segment_count = 960
        elif block_size == '1m' and self.slots == 64:
            segment_count = 480
        elif block_size == '4m' and self.slots == 16:
            segment_count = 480
        elif block_size == '4m' and self.slots == 32:
            segment_count = 240
        elif block_size == '4m' and self.slots == 64:
            segment_count = 120

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None, None, createsvc)

            pool_uuid = self.pool.get_uuid_str()
            svc_list = ""
            for item in range(createsvc):
                svc_list += str(int(self.pool.svc.rl_ranks[item])) + ":"
            svc_list = svc_list[:-1]

            ior_utils.run_ior(self.hostfile_clients, ior_flags, iteration,
                              block_size, transfer_size, pool_uuid, svc_list,
                              record_size, stripe_size, stripe_count, async_io,
                              object_class, self.basepath, self.slots,
                              segment_count)

        except (ior_utils.IorFailed, DaosApiError) as excep:
            self.fail("<SegCount Test FAILED>.{}".format(excep))
示例#22
0
文件: soak.py 项目: morsiee/daos
class Soak(Test):
    """
    Test class Description: DAOS Soak test cases
    """
    def job_done(self, args):
        """
        This is a callback function called when a job is done

        handle --which job, i.e. the job ID
        state  --string indicating job completion status
        """

        self.soak_results[args["handle"]] = args["state"]

    def create_pool(self):
        """
        Creates a pool that the various tests use for storage.
        """

        createmode = self.params.get("mode", '/run/pool1/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool1/createset/')
        createsize = self.params.get("size", '/run/pool1/createsize/')
        self.createsvc = self.params.get("svcn", '/run/pool1/createsvc/')

        self.pool = DaosPool(self.context)
        self.pool.create(createmode, createuid, creategid, createsize,
                         createsetid, None, None, self.createsvc)

    def build_ior_script(self, job):
        """
        Builds an IOR command string which is then added to slurm script

        job --which job to read in the yaml file

        """

        # read job info
        job_params = "/run/" + job + "/"
        job_name = self.params.get("name", job_params)
        job_nodes = self.params.get("nodes", job_params)
        job_processes = self.params.get("process_per_node", job_params)
        job_spec = self.params.get("jobspec", job_params)

        # read ior cmd info
        spec = "/run/" + job_spec + "/"
        iteration = self.params.get("iter", spec + 'iteration/')
        ior_flags = self.params.get("F", spec + 'iorflags/')
        transfer_size = self.params.get("t", spec + 'transfersize/')
        record_size = self.params.get("r", spec + 'recordsize/*')
        stripe_size = self.params.get("s", spec + 'stripesize/*')
        stripe_count = self.params.get("c", spec + 'stripecount/')
        async_io = self.params.get("a", spec + 'asyncio/')
        object_class = self.params.get("o", spec + 'objectclass/')

        self.partition = self.params.get("partition",
                                         '/run/hosts/test_machines/')

        pool_uuid = self.pool.get_uuid_str()
        tmplist = []
        svc_list = ""
        for i in range(self.createsvc):
            tmplist.append(int(self.pool.svc.rl_ranks[i]))
            svc_list += str(tmplist[i]) + ":"
        svc_list = svc_list[:-1]

        block_size = '1536m'

        if stripe_size == '8m':
            transfer_size = stripe_size

        hostfile = os.path.join(self.tmpdir, "ior_hosts_" + job_name)

        cmd = ior_utils.get_ior_cmd(ior_flags, iteration, block_size,
                                    transfer_size, pool_uuid, svc_list,
                                    record_size, stripe_size, stripe_count,
                                    async_io, object_class, self.basepath,
                                    hostfile, job_processes)

        output = os.path.join(self.tmpdir, job_name + "_results.out")
        script = slurm_utils.write_slurm_script(self.tmpdir, job_name, output,
                                                int(job_nodes), [cmd])
        return script

    def setUp(self):

        # intermediate results are stored in this global
        # start off with it empty
        self.soak_results = {}

        self.partition = None

        # initialize anything we rely on existing
        self.pool = None
        self.hostlist_servers = None

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as thefile:
            build_paths = json.load(thefile)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        # workdir was not successful, not sure why right now
        self.tmpdir = self.basepath + "/install/tmp"
        try:
            os.makedirs(self.tmpdir)
        except:
            pass

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        # start the servers
        self.hostlist_servers = self.params.get("daos_servers",
                                                '/run/hosts/test_machines/*')
        filename = write_host_file.write_host_file(self.hostlist_servers,
                                                   self.workdir)
        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')
        print("Servers {} group {} basepath {}".format(self.hostlist_servers,
                                                       self.server_group,
                                                       self.basepath))
        server_utils.run_server(filename, self.server_group, self.basepath)

        # setup the storage
        self.create_pool()

    def tearDown(self):
        server_utils.stop_server(hosts=self.hostlist_servers)

    def test_soak_1(self):
        """
        Test ID: DAOS-2192
        Test Description: This test runs 2 DAOS API IOR jobs.
        :avocado: tags=soak1
        """

        try:
            # turn job parameters into slurm script
            script1 = self.build_ior_script('job1')

            # queue it up to run and register a callback to retrieve results
            job_id1 = slurm_utils.run_slurm_script(script1)
            slurm_utils.register_for_job_results(job_id1, self, maxwait=3600)

            # queue up a second job
            script2 = self.build_ior_script('job2')
            job_id2 = slurm_utils.run_slurm_script(script2)
            slurm_utils.register_for_job_results(job_id2, self, maxwait=3600)

            # wait for all the jobs to finish
            while len(self.soak_results) < 2:
                time.sleep(10)

            for job, result in self.soak_results.iteritems():
                if result != "COMPLETED":
                    self.fail(
                        "Soak job: {} didn't complete as expected: {}".format(
                            job, result))

        except (DaosApiError, ior_utils.IorFailed) as error:
            self.fail("<Soak Test 1 Failed>\n {}".format(error))
        finally:
            try:
                os.remove(script1)
            except StandardError:
                pass
            try:
                os.remove(script2)
            except StandardError:
                pass

    def test_soak_2(self):
        """
        Test ID: DAOS-2192
        Test Description: This test verifies that a dmg script can be submitted.
        :avocado: tags=soak2
        """

        script = None
        try:
            dmgcmds = dmg_utils.get_dmg_script("dmg1", self.params,
                                               self.basepath)

            s2_job1_name = self.params.get("name", '/run/job3/')
            s2_job1_nodes = self.params.get("nodes", '/run/job3/')

            output = os.path.join(self.tmpdir, s2_job1_name + "_results.out")

            script = slurm_utils.write_slurm_script(self.tmpdir, s2_job1_name,
                                                    output, s2_job1_nodes,
                                                    dmgcmds)
            job_id = slurm_utils.run_slurm_script(script)
            slurm_utils.register_for_job_results(job_id, self, maxwait=3600)

            # wait for all the jobs to finish
            while len(self.soak_results) < 1:
                time.sleep(10)

            for job, result in self.soak_results.iteritems():
                if result != "COMPLETED":
                    self.fail(
                        "Soak job: {} didn't complete as expected: {}".format(
                            job, result))

        except (DaosApiError, ior_utils.IorFailed) as error:
            self.fail("Soak Test 2 Failed/n {}".format(error))
        finally:
            try:
                os.remove(script)
            finally:
                pass

    def test_soak_3(self):
        """
        Test ID: DAOS-2192
        Test Description: this time try a dmg command combined with IOR run
        Use Cases:
        :avocado: tags=soak3
        """

        script1 = None
        script2 = None
        try:
            # retrieve IOR job parameters
            script1 = self.build_ior_script('job1')
            job_id1 = slurm_utils.run_slurm_script(script1)
            slurm_utils.register_for_job_results(job_id1, self, maxwait=3600)

            # now do the dmg job
            dmgcmds = dmg_utils.get_dmg_script("dmg1", self.params,
                                               self.basepath)

            s3_job2_name = self.params.get("name", '/run/job3/')
            s3_job2_nodes = self.params.get("nodes", '/run/job3/')
            output = os.path.join(self.tmpdir, s3_job2_name + "_results.out")
            script2 = slurm_utils.write_slurm_script(self.tmpdir, s3_job2_name,
                                                     output, s3_job2_nodes,
                                                     dmgcmds)
            job_id2 = slurm_utils.run_slurm_script(script2)
            slurm_utils.register_for_job_results(job_id2, self, maxwait=3600)

            # wait for all the jobs to finish
            while len(self.soak_results) < 2:
                time.sleep(10)

            for job, result in self.soak_results.iteritems():
                if result != "COMPLETED":
                    self.fail(
                        "Soak job: {} didn't complete as expected: {}".format(
                            job, result))

        except (DaosApiError, ior_utils.IorFailed) as error:
            self.fail("Soak Test 3 Failed\n {}".format(error))
        finally:
            try:
                os.remove(script1)
            except StandardError:
                pass
            try:
                os.remove(script2)
            except StandardError:
                pass
示例#23
0
class EightServers(Test):
    """
    Test class Description: Runs IOR with 8 servers.
    """
    def __init__(self, *args, **kwargs):

        super(EightServers, self).__init__(*args, **kwargs)

        self.basepath = None
        self.server_group = None
        self.context = None
        self.pool = None
        self.slots = None
        self.hostlist_servers = None
        self.hostfile_servers = None
        self.hostlist_clients = None
        self.hostfile_clients = None

    def setUp(self):
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.hostlist_servers = self.params.get("test_servers",
                                                '/run/hosts/test_machines/*')
        self.hostfile_servers = (write_host_file.write_host_file(
            self.hostlist_servers, self.workdir))
        print("Host file servers is: {}".format(self.hostfile_servers))

        self.hostlist_clients = self.params.get("test_clients",
                                                '/run/hosts/test_machines/*')
        self.slots = self.params.get("slots", '/run/ior/clientslots/*')
        self.hostfile_clients = (write_host_file.write_host_file(
            self.hostlist_clients, self.workdir, self.slots))
        print("Host file clients is: {}".format(self.hostfile_clients))

        self.agent_sessions = agent_utils.run_agent(self.basepath,
                                                    self.hostlist_servers,
                                                    self.hostlist_clients)
        server_utils.run_server(self.hostfile_servers, self.server_group,
                                self.basepath)

        if not distutils.spawn.find_executable("ior") and \
           int(str(self.name).split("-")[0]) == 1:
            ior_utils.build_ior(self.basepath)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                agent_utils.stop_agent(self.agent_sessions,
                                       self.hostlist_clients)
            server_utils.stop_server(hosts=self.hostlist_servers)

    def executable(self, iorflags=None):
        """
        Executable function to run ior for sequential and random order
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        block_size = self.params.get("blocksize", '/run/ior/clientslots/*')
        record_size = self.params.get("r", '/run/ior/recordsize/*')
        stripe_count = self.params.get("c", '/run/ior/stripecount/')
        async_io = self.params.get("a", '/run/ior/asyncio/')
        object_class = self.params.get("o", '/run/ior/objectclass/*/')
        transfer_size = self.params.get("t",
                                        '/run/ior/transfersize_stripesize/*/')
        stripe_size = self.params.get("s",
                                      '/run/ior/transfersize_stripesize/*/')
        expected_result = 'PASS'

        if record_size == '4k' and transfer_size == '1k':
            expected_result = 'FAIL'

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None, None, createsvc)

            pool_uuid = self.pool.get_uuid_str()
            svc_list = ""
            for item in range(createsvc):
                svc_list += str(int(self.pool.svc.rl_ranks[item])) + ":"
            svc_list = svc_list[:-1]

            print("svc_list: {}".format(svc_list))

            ior_utils.run_ior(self.hostfile_clients, iorflags, iteration,
                              block_size, transfer_size, pool_uuid, svc_list,
                              record_size, stripe_size, stripe_count, async_io,
                              object_class, self.basepath, self.slots)

            if expected_result == 'FAIL':
                self.fail("Test was expected to fail but it passed.\n")

        except (DaosApiError, ior_utils.IorFailed) as excep:
            print(excep)
            if expected_result != 'FAIL':
                self.fail("Test was expected to pass but it failed.\n")

    def test_sequential(self):
        """
        Test ID: DAOS-1264
        Test Description: Run IOR with 32,64 and 128 clients config
                          sequentially.
        Use Cases: Different combinations of 32/64/128 Clients, 8b/1k/4k
                   record size, 1k/4k/1m/8m transfersize and stripesize
                   and 16 async io.
        :avocado: tags=ior,eightservers,ior_sequential
        """
        ior_flags = self.params.get("F", '/run/ior/iorflags/sequential/')
        self.executable(ior_flags)

    def test_random(self):
        """
        Test ID: DAOS-1264
        Test Description: Run IOR with 32,64 and 128 clients config in random
                          order.
        Use Cases: Different combinations of 32/64/128 Clients, 8b/1k/4k
                   record size, 1k/4k/1m/8m transfersize and stripesize
                   and 16 async io.
        :avocado: tags=ior,eightservers,ior_random
        """
        ior_flags = self.params.get("F", '/run/ior/iorflags/random/')
        self.executable(ior_flags)
示例#24
0
    def test_connect(self):
        """
        Pass bad parameters to pool connect

        :avocado: tags=pool,poolconnect,badparam,badconnect
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/connecttests/createmode/')
        createuid = self.params.get("uid", '/run/connecttests/uids/createuid/')
        creategid = self.params.get("gid", '/run/connecttests/gids/creategid/')
        createsetid = self.params.get("setname",
                                      '/run/connecttests/setnames/createset/')
        createsize = self.params.get("size",
                                     '/run/connecttests/psize/createsize/')

        # Accumulate a list of pass/fail indicators representing what is
        # expected for each parameter then "and" them to determine the
        # expected result of the test
        expected_for_param = []

        modelist = self.params.get("mode", '/run/connecttests/connectmode/*/')
        connectmode = modelist[0]
        expected_for_param.append(modelist[1])

        svclist = self.params.get("ranklist", '/run/connecttests/svrlist/*/')
        svc = svclist[0]
        expected_for_param.append(svclist[1])

        setlist = self.params.get("setname",
                                  '/run/connecttests/connectsetnames/*/')
        connectset = setlist[0]
        expected_for_param.append(setlist[1])

        uuidlist = self.params.get("uuid", '/run/connecttests/UUID/*/')
        connectuuid = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        # if any parameter is FAIL then the test should FAIL, in this test
        # virtually everyone should FAIL since we are testing bad parameters
        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        puuid = (ctypes.c_ubyte * 16)()
        psvc = RankList()
        pgroup = ctypes.create_string_buffer(0)
        pool = None
        try:
            # setup the DAOS python API
            with open('../../../.build_vars.json') as build_file:
                data = json.load(build_file)
            context = DaosContext(data['PREFIX'] + '/lib/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            # save this uuid since we might trash it as part of the test
            ctypes.memmove(puuid, pool.uuid, 16)

            # trash the the pool service rank list
            psvc.rl_ranks = pool.svc.rl_ranks
            psvc.rl_nr = pool.svc.rl_nr
            if not svc == 'VALID':
                rl_ranks = ctypes.POINTER(ctypes.c_uint)()
                pool.svc = RankList(rl_ranks, 1)

            # trash the pool group value
            pgroup = pool.group
            if connectset == 'NULLPTR':
                pool.group = None

            # trash the UUID value in various ways
            if connectuuid == 'NULLPTR':
                pool.uuid = None
            if connectuuid == 'JUNK':
                pool.uuid[4] = 244

            pool.connect(connectmode)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result in ['PASS']:
                self.fail("Test was expected to pass but it failed.\n")

        # cleanup the pool
        finally:
            if pool is not None and pool.attached == 1:
                # restore values in case we trashed them during test
                pool.svc.rl_ranks = psvc.rl_ranks
                pool.svc.rl_nr = psvc.rl_nr
                pool.group = pgroup
                ctypes.memmove(pool.uuid, puuid, 16)
                print("pool uuid after restore {}".format(
                    pool.get_uuid_str()))
                pool.destroy(1)
示例#25
0
class MultipleClients(Test):
    """
    Test class Description: Runs IOR with multiple clients.

    """
    def setUp(self):
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as f:
            build_paths = json.load(f)
        self.basepath = os.path.normpath(build_paths['PREFIX']  + "/../")

        self.server_group = self.params.get("server_group", '/server/','daos_server')
        self.daosctl = self.basepath + '/install/bin/daosctl'

        # setup the DAOS python API
        self.Context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.pool = None

        self.hostlist_servers = self.params.get("test_servers", '/run/hosts/test_machines/*')
        self.hostfile_servers = WriteHostFile.WriteHostFile(self.hostlist_servers, self.workdir)
        print("Host file servers is: {}".format(self.hostfile_servers))

        self.hostlist_clients = self.params.get("clients", '/run/hosts/test_machines/test_clients/*')
        self.hostfile_clients = WriteHostFile.WriteHostFile(self.hostlist_clients, self.workdir)
        print("Host file clientsis: {}".format(self.hostfile_clients))

        ServerUtils.runServer(self.hostfile_servers, self.server_group, self.basepath)

        if int(str(self.name).split("-")[0]) == 1:
            IorUtils.build_ior(self.basepath)

    def tearDown(self):
        try:
            if self.hostfile_clients is not None:
                os.remove(self.hostfile_clients)
            if self.hostfile_servers is not None:
                os.remove(self.hostfile_servers)
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            ServerUtils.stopServer(hosts=self.hostlist_servers)

    def test_multipleclients(self):
        """
        Test ID: DAOS-1263
        Test Description: Test IOR with 16 and 32 clients config.
        Use Cases: Different combinations of 16/32 Clients, 8b/1k/4k
                   record size, 1m/8m stripesize and 16 async io.
        :avocado: tags=ior,twoservers,multipleclients
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        slots = self.params.get("slots", '/run/ior/clientslots/*')
        ior_flags = self.params.get("F", '/run/ior/iorflags/')
        transfer_size = self.params.get("t", '/run/ior/transfersize/')
        record_size = self.params.get("r", '/run/ior/recordsize/*')
        stripe_size = self.params.get("s", '/run/ior/stripesize/*')
        stripe_count = self.params.get("c", '/run/ior/stripecount/')
        async_io = self.params.get("a", '/run/ior/asyncio/')
        object_class = self.params.get("o", '/run/ior/objectclass/')

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.Context)
            self.pool.create(createmode, createuid, creategid,
                    createsize, createsetid, None, None, createsvc)


            with open(self.hostfile_clients) as f:
                newText=f.read().replace('slots=1', 'slots={0}').format(slots)

            with open(self.hostfile_clients, "w") as f:
                f.write(newText)

            pool_uuid = self.pool.get_uuid_str()
            list = []
            svc_list = ""
            for i in range(createsvc):
                list.append(int(self.pool.svc.rl_ranks[i]))
                svc_list += str(list[i]) + ":"
            svc_list = svc_list[:-1]

            if slots == 8:
                block_size = '3g'
            elif slots == 16:
                block_size = '1536m'

            if (stripe_size == '8m'):
                transfer_size = stripe_size

            IorUtils.run_ior(self.hostfile_clients, ior_flags, iteration, block_size, transfer_size,
                             pool_uuid, svc_list, record_size, stripe_size, stripe_count,
                             async_io, object_class, self.basepath, slots)

        except (DaosApiError, IorUtils.IorFailed) as e:
            self.fail("<MultipleClients Test run Failed>\n {}".format(e))
示例#26
0
class ObjectMetadata(avocado.Test):
    """
    Test Class Description:
        Test the general Metadata operations and boundary conditions.
    """
    def setUp(self):
        self.pool = None
        self.hostlist = None
        self.hostfile_clients = None
        self.hostfile = None
        self.out_queue = None
        self.pool_connect = True

        with open('../../../.build_vars.json') as json_f:
            build_paths = json.load(json_f)

        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        self.server_group = self.params.get("server_group", '/server/',
                                            'daos_server')
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.d_log = DaosLog(self.context)
        self.hostlist = self.params.get("servers", '/run/hosts/*')
        self.hostfile = WriteHostFile.WriteHostFile(self.hostlist,
                                                    self.workdir)
        hostlist_clients = self.params.get("clients", '/run/hosts/*')
        self.hostfile_clients = WriteHostFile.WriteHostFile(
            hostlist_clients, self.workdir)
        ServerUtils.runServer(self.hostfile, self.server_group, self.basepath)

        self.pool = DaosPool(self.context)
        self.pool.create(self.params.get("mode", '/run/pool/createmode/*'),
                         os.geteuid(),
                         os.getegid(),
                         self.params.get("size", '/run/pool/createsize/*'),
                         self.params.get("setname", '/run/pool/createset/*'),
                         nvme_size=self.params.get("size",
                                                   '/run/pool/nvmesize/*'))

    def tearDown(self):
        try:
            if self.pool_connect:
                self.pool.disconnect()
            if self.pool:
                self.pool.destroy(1)
        finally:
            ServerUtils.stopServer(hosts=self.hostlist)

    @avocado.skip("Skipping until DAOS-1936/DAOS-1946 is fixed.")
    def test_metadata_fillup(self):
        """
        Test ID: DAOS-1512
        Test Description: Test to verify no IO happens after metadata is full.
        :avocado: tags=metadata,metadata_fill,nvme,small
        """
        self.pool.connect(2)
        container = DaosContainer(self.context)
        self.d_log.debug("Fillup Metadata....")
        for _cont in range(NO_OF_MAX_CONTAINER):
            container.create(self.pool.handle)
        self.d_log.debug("Metadata Overload...")
        #This should fail with no Metadata space Error.
        try:
            for _cont in range(250):
                container.create(self.pool.handle)
        except DaosApiError as exe:
            print(exe, traceback.format_exc())
            return

        self.fail("Test was expected to fail but it passed.\n")

    @avocado.skip("Skipping until DAOS-1965 is fixed.")
    @avocado.fail_on(DaosApiError)
    def test_metadata_addremove(self):
        """
        Test ID: DAOS-1512
        Test Description: Verify metadata release the space
                          after container delete.
        :avocado: tags=metadata,metadata_free_space,nvme,small
        """
        self.pool.connect(2)
        for k in range(10):
            container_array = []
            self.d_log.debug("Container Create Iteration {}".format(k))
            for cont in range(NO_OF_MAX_CONTAINER):
                container = DaosContainer(self.context)
                container.create(self.pool.handle)
                container_array.append(container)
            self.d_log.debug("Container Remove Iteration {} ".format(k))
            for cont in container_array:
                cont.destroy()

    def thread_control(self, threads, operation):
        """
        Start threads and wait till all threads execution is finished.
        It check queue for "FAIL" message and fail the avocado test.
        """
        self.d_log.debug("IOR {0} Threads Started -----".format(operation))
        for thrd in threads:
            thrd.start()
        for thrd in threads:
            thrd.join()

        while not self.out_queue.empty():
            if self.out_queue.get() is "FAIL":
                return "FAIL"
        self.d_log.debug("IOR {0} Threads Finished -----".format(operation))
        return "PASS"

    @avocado.fail_on(DaosApiError)
    def test_metadata_server_restart(self):
        """
        Test ID: DAOS-1512
        Test Description: This test will verify 2000 IOR small size container after server restart.
                          Test will write IOR in 5 different threads for faster execution time.
                          Each thread will create 400 (8bytes) containers to the same pool.
                          Restart the servers, read IOR container file written previously and
                          validate data integrity by using IOR option "-R -G 1".
        :avocado: tags=metadata,metadata_ior,nvme,small
        """
        self.pool_connect = False
        files_per_thread = 400
        total_ior_threads = 5
        threads = []
        ior_args = {}

        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        svc_list = ""
        for i in range(createsvc):
            svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":"
        svc_list = svc_list[:-1]

        ior_args['client_hostfile'] = self.hostfile_clients
        ior_args['pool_uuid'] = self.pool.get_uuid_str()
        ior_args['svc_list'] = svc_list
        ior_args['basepath'] = self.basepath
        ior_args['server_group'] = self.server_group
        ior_args['tmp_dir'] = self.workdir
        ior_args['iorwriteflags'] = self.params.get("F",
                                                    '/run/ior/iorwriteflags/')
        ior_args['iorreadflags'] = self.params.get("F",
                                                   '/run/ior/iorreadflags/')
        ior_args['iteration'] = self.params.get("iter", '/run/ior/iteration/')
        ior_args['stripe_size'] = self.params.get("s", '/run/ior/stripesize/*')
        ior_args['stripe_count'] = self.params.get("c",
                                                   '/run/ior/stripecount/')
        ior_args['async_io'] = self.params.get("a", '/run/ior/asyncio/')
        ior_args['object_class'] = self.params.get("o",
                                                   '/run/ior/objectclass/')
        ior_args['slots'] = self.params.get("slots", '/run/ior/clientslots/*')

        ior_args['files_per_thread'] = files_per_thread
        self.out_queue = Queue.Queue()

        #IOR write threads
        for i in range(total_ior_threads):
            threads.append(
                threading.Thread(target=ior_runner_thread,
                                 args=(self.out_queue, "Thread-{}".format(i),
                                       "write"),
                                 kwargs=ior_args))
        if self.thread_control(threads, "write") == "FAIL":
            self.d_log.error(" IOR write Thread FAIL")
            self.fail(" IOR write Thread FAIL")

        #Server Restart
        ServerUtils.stopServer(hosts=self.hostlist)
        ServerUtils.runServer(self.hostfile, self.server_group, self.basepath)

        #Read IOR with verification with same number of threads
        threads = []
        for i in range(total_ior_threads):
            threads.append(
                threading.Thread(target=ior_runner_thread,
                                 args=(self.out_queue, "Thread-{}".format(i),
                                       "read"),
                                 kwargs=ior_args))
        if self.thread_control(threads, "read") == "FAIL":
            self.d_log.error(" IOR write Thread FAIL")
            self.fail(" IOR read Thread FAIL")
示例#27
0
    def test_connect(self):
        """
        Pass bad parameters to pool connect

        :avocado: tags=all,pool,full_regression,tiny,badconnect
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/connecttests/createmode/')
        createuid = self.params.get("uid", '/run/connecttests/uids/createuid/')
        creategid = self.params.get("gid", '/run/connecttests/gids/creategid/')
        createsetid = self.params.get("setname",
                                      '/run/connecttests/setnames/createset/')
        createsize = self.params.get("size",
                                     '/run/connecttests/psize/createsize/')

        # Accumulate a list of pass/fail indicators representing what is
        # expected for each parameter then "and" them to determine the
        # expected result of the test
        expected_for_param = []

        modelist = self.params.get("mode", '/run/connecttests/connectmode/*/')
        connectmode = modelist[0]
        expected_for_param.append(modelist[1])

        svclist = self.params.get("ranklist", '/run/connecttests/svrlist/*/')
        svc = svclist[0]
        expected_for_param.append(svclist[1])

        setlist = self.params.get("setname",
                                  '/run/connecttests/connectsetnames/*/')
        connectset = setlist[0]
        expected_for_param.append(setlist[1])

        uuidlist = self.params.get("uuid", '/run/connecttests/UUID/*/')
        connectuuid = uuidlist[0]
        expected_for_param.append(uuidlist[1])

        # if any parameter is FAIL then the test should FAIL, in this test
        # virtually everyone should FAIL since we are testing bad parameters
        expected_result = 'PASS'
        for result in expected_for_param:
            if result == 'FAIL':
                expected_result = 'FAIL'
                break

        puuid = (ctypes.c_ubyte * 16)()
        psvc = RankList()
        pgroup = ctypes.create_string_buffer(0)
        pool = None
        try:
            # setup the DAOS python API
            with open('../../../.build_vars.json') as build_file:
                data = json.load(build_file)
            context = DaosContext(data['PREFIX'] + '/lib/')

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(context)
            pool.create(createmode, createuid, creategid, createsize,
                        createsetid, None)
            # save this uuid since we might trash it as part of the test
            ctypes.memmove(puuid, pool.uuid, 16)

            # trash the the pool service rank list
            psvc.rl_ranks = pool.svc.rl_ranks
            psvc.rl_nr = pool.svc.rl_nr
            if not svc == 'VALID':
                rl_ranks = ctypes.POINTER(ctypes.c_uint)()
                pool.svc = RankList(rl_ranks, 1)

            # trash the pool group value
            pgroup = pool.group
            if connectset == 'NULLPTR':
                pool.group = None

            # trash the UUID value in various ways
            if connectuuid == 'NULLPTR':
                pool.uuid = None
            if connectuuid == 'JUNK':
                pool.uuid[4] = 244

            pool.connect(connectmode)

            if expected_result in ['FAIL']:
                self.fail("Test was expected to fail but it passed.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            if expected_result in ['PASS']:
                self.fail("Test was expected to pass but it failed.\n")

        # cleanup the pool
        finally:
            if pool is not None and pool.attached == 1:
                # restore values in case we trashed them during test
                pool.svc.rl_ranks = psvc.rl_ranks
                pool.svc.rl_nr = psvc.rl_nr
                pool.group = pgroup
                ctypes.memmove(pool.uuid, puuid, 16)
                print("pool uuid after restore {}".format(pool.get_uuid_str()))
                pool.destroy(1)
示例#28
0
class EightServers(TestWithServers):
    """
    Test class Description: Runs IOR with 8 servers.
    :avocado: recursive
    """
    def setUp(self):
        super(EightServers, self).setUp()

        # setting client variables
        self.hostfile_clients = (write_host_file.write_host_file(
            self.hostlist_clients, self.workdir, None))

    def executable(self, iorflags=None):
        """
        Executable function to run ior for sequential and random order
        """

        # parameters used in pool create
        createmode = self.params.get("mode_RW", '/run/pool/createmode/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')

        # ior parameters
        client_processes = self.params.get("np", '/run/ior/clientslots/*')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        block_size = self.params.get("b", '/run/ior/transfersize_blocksize/*/')
        object_class = self.params.get("o", '/run/ior/objectclass/*/')
        transfer_size = self.params.get("t",
                                        '/run/ior/transfersize_blocksize/*/')

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            #print("self.context:{}".format(self.context))
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None, None, createsvc)

            pool_uuid = self.pool.get_uuid_str()
            svc_list = ""
            for item in range(createsvc):
                svc_list += str(int(self.pool.svc.rl_ranks[item])) + ":"
            svc_list = svc_list[:-1]

            print("svc_list: {}".format(svc_list))

            ior_utils.run_ior_daos(self.hostfile_clients, iorflags, iteration,
                                   block_size, transfer_size, pool_uuid,
                                   svc_list, object_class, self.basepath,
                                   client_processes)

        except (DaosApiError, ior_utils.IorFailed) as excep:
            print(excep)
            self.fail("Test was expected to pass but it failed.\n")

    def test_sequential(self):
        """
        Jira ID: DAOS-1264
        Test Description: Run IOR with 1,64 and 128 clients config
                          sequentially.
        Use Cases: Different combinations of 1/64/128 Clients and
                   1K/4K/32K/128K/512K/1M transfersize.
        :avocado: tags=ior,eightservers,ior_sequential
        """
        ior_flags = self.params.get("F", '/run/ior/iorflags/sequential/')
        self.executable(ior_flags)

    def test_random(self):
        """
        Jira ID: DAOS-1264
        Test Description: Run IOR with 1,64 and 128 clients config in random
                          order.
        Use Cases: Different combinations of 1/64/128 Clients and
                   1K/4K/32K/128K/512K/1M transfersize.
        :avocado: tags=ior,eightservers,ior_random
        """
        ior_flags = self.params.get("F", '/run/ior/iorflags/random/')
        self.executable(ior_flags)
示例#29
0
class IorSingleServer(Test):
    """
    Tests IOR with Single Server config.

    """
    def setUp(self):
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as f:
            build_paths = json.load(f)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("server_group", '/server/',
                                            'daos_server')
        self.daosctl = self.basepath + '/install/bin/daosctl'

        # setup the DAOS python API
        self.Context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.POOL = None

        self.hostlist_servers = self.params.get("test_servers",
                                                '/run/hosts/test_machines/*')
        self.hostfile_servers = WriteHostFile.WriteHostFile(
            self.hostlist_servers, self.workdir)
        print("Host file servers is: {}".format(self.hostfile_servers))

        self.hostlist_clients = self.params.get(
            "clients", '/run/hosts/test_machines/diff_clients/*')
        self.hostfile_clients = WriteHostFile.WriteHostFile(
            self.hostlist_clients, self.workdir)
        print("Host file clientsis: {}".format(self.hostfile_clients))

        ServerUtils.runServer(self.hostfile_servers, self.server_group,
                              self.basepath)

        if int(str(self.name).split("-")[0]) == 1:
            IorUtils.build_ior(self.basepath)

    def tearDown(self):
        try:
            if self.hostfile_clients is not None:
                os.remove(self.hostfile_clients)
            if self.hostfile_servers is not None:
                os.remove(self.hostfile_servers)
            if self.POOL is not None and self.POOL.attached:
                self.POOL.destroy(1)
        finally:
            ServerUtils.stopServer(hosts=self.hostlist_servers)

    def test_singleserver(self):
        """
        Test IOR with Single Server config.

        :avocado: tags=ior,singleserver
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createsvc = self.params.get("svcn", '/run/createtests/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        ior_flags = self.params.get("F", '/run/ior/iorflags/')
        transfer_size = self.params.get("t", '/run/ior/transfersize/')
        record_size = self.params.get("r", '/run/ior/recordsize/')
        segment_count = self.params.get("s", '/run/ior/segmentcount/')
        stripe_count = self.params.get("c", '/run/ior/stripecount/')
        async_io = self.params.get("a", '/run/ior/asyncio/')
        object_class = self.params.get("o", '/run/ior/objectclass/')

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.POOL = DaosPool(self.Context)
            self.POOL.create(createmode, createuid, creategid, createsize,
                             createsetid, None, None, createsvc)
            pool_uuid = self.POOL.get_uuid_str()
            print("pool_uuid: {}".format(pool_uuid))
            list = []
            svc_list = ""
            for i in range(createsvc):
                list.append(int(self.POOL.svc.rl_ranks[i]))
                svc_list += str(list[i]) + ":"
            svc_list = svc_list[:-1]

            if len(self.hostlist_clients) == 1:
                block_size = '12g'
            elif len(self.hostlist_clients) == 2:
                block_size = '6g'
            elif len(self.hostlist_clients) == 4:
                block_size = '3g'

            IorUtils.run_ior(self.hostfile_clients, ior_flags, iteration,
                             block_size, transfer_size, pool_uuid, svc_list,
                             record_size, segment_count, stripe_count,
                             async_io, object_class, self.basepath)

        except (DaosApiError, IorUtils.IorFailed) as e:
            self.fail("<Single Server Test FAILED>\n {}".format(e))
示例#30
0
    def test_null_values(self):
        """
        Test ID: DAOS-1376

        Test Description: Pass a dkey and an akey that is null.

        :avocado: tags=all,object,full_regression,small,objupdatenull
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid, createsize,
                        createsetid, None)
            self.plog.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # data used in the test
            thedata = "a string that I want to stuff into an object"
            thedatasize = len(thedata) + 1

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test failed during setup .\n")

        try:
            # try using a null dkey
            dkey = None
            akey = "this is the akey"

            container.write_an_obj(thedata, thedatasize, dkey, akey, None,
                                   None, 2)

            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            self.plog.error("Didn't get expected return code.")
            self.fail("Test was expected to return a -1003 but it has not.\n")

        except DaosApiError as excep:
            if '-1003' not in str(excep):
                container.close()
                container.destroy()
                pool.disconnect()
                pool.destroy(1)
                self.plog.error("Didn't get expected return code.")
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1003 but it has not.\n")

        try:
            # try using a null akey/io descriptor
            dkey = "this is the dkey"
            akey = None
            container.write_an_obj(thedata, thedatasize, dkey, akey, None,
                                   None, 2)
            self.fail("Test was expected to return a -1003 but it has not.\n")

        except DaosApiError as excep:
            if '-1003' not in str(excep):
                self.plog.error("Didn't get expected return code.")
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1003 but it has not.\n")

        try:
            # lastly try passing no data
            thedata = None
            thedatasize = 0
            dkey = "this is the dkey"
            akey = "this is the akey"

            container.write_an_obj(thedata, thedatasize, dkey, akey, None,
                                   None, 2)
            self.plog.info("Update with no data worked")

        except DaosApiError as excep:
            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            print(excep)
            print(traceback.format_exc())
            self.plog.error("Update with no data failed")
            self.fail("Update with no data failed.\n")

        container.close()
        container.destroy()
        pool.disconnect()
        pool.destroy(1)
        self.plog.info("Test Complete")
示例#31
0
class IorSingleServer(Test):
    """
    Tests IOR with Single Server config.
    :avocado: recursive
    """
    def setUp(self):
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')
        self.daosctl = self.basepath + '/install/bin/daosctl'

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.pool = None

        self.hostlist_servers = self.params.get("test_servers",
                                                '/run/hosts/test_machines/*')
        self.hostfile_servers = (write_host_file.write_host_file(
            self.hostlist_servers, self.workdir))
        print("Host file servers is: {}".format(self.hostfile_servers))

        self.hostlist_clients = self.params.get("test_clients",
                                                '/run/hosts/test_machines/*')
        self.hostfile_clients = (write_host_file.write_host_file(
            self.hostlist_clients, self.workdir, None))
        print("Host file clientsis: {}".format(self.hostfile_clients))

        self.agent_sessions = agent_utils.run_agent(self.basepath,
                                                    self.hostlist_servers,
                                                    self.hostlist_clients)
        server_utils.run_server(self.hostfile_servers, self.server_group,
                                self.basepath)

    def tearDown(self):
        try:
            if self.hostfile_clients is not None:
                os.remove(self.hostfile_clients)
            if self.hostfile_servers is not None:
                os.remove(self.hostfile_servers)
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                agent_utils.stop_agent(self.agent_sessions,
                                       self.hostlist_clients)
            server_utils.stop_server(hosts=self.hostlist_servers)

    def test_singleserver(self):
        """
        Test IOR with Single Server config.

        :avocado: tags=ior,singleserver
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/createtests/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/createtests/createset/')
        createsize = self.params.get("size", '/run/createtests/createsize/')
        createsvc = self.params.get("svcn", '/run/createtests/createsvc/')

        # ior parameters
        client_processes = self.params.get("np",
                                           '/run/ior/client_processes/*/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        ior_flags = self.params.get("F", '/run/ior/iorflags/')
        transfer_size = self.params.get("t",
                                        '/run/ior/transfersize_blocksize/*/')
        block_size = self.params.get("b", '/run/ior/transfersize_blocksize/*/')
        object_class = self.params.get("o", '/run/ior/objectclass/')

        try:
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None, None, createsvc)
            pool_uuid = self.pool.get_uuid_str()
            print("pool_uuid: {}".format(pool_uuid))
            tmp_rank_list = []
            svc_list = ""
            for item in range(createsvc):
                tmp_rank_list.append(int(self.pool.svc.rl_ranks[item]))
                svc_list += str(tmp_rank_list[item]) + ":"
            svc_list = svc_list[:-1]

            ior_utils.run_ior_daos(self.hostfile_clients, ior_flags, iteration,
                                   block_size, transfer_size, pool_uuid,
                                   svc_list, object_class, self.basepath,
                                   client_processes)

        except (DaosApiError, ior_utils.IorFailed) as excep:
            self.fail("<Single Server Test FAILED>\n {}".format(excep))
示例#32
0
    def test_bad_handle(self):
        """
        Test ID: DAOS-1376

        Test Description: Pass a bogus object handle, should return bad handle.

        :avocado: tags=all,object,full_regression,small,objbadhand
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid, createsize,
                        createsetid, None)
            self.plog.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # create an object and write some data into it
            thedata = "a string that I want to stuff into an object"
            thedatasize = len(thedata) + 1
            dkey = "this is the dkey"
            akey = "this is the akey"
            obj, dummy_tx = container.write_an_obj(thedata, thedatasize, dkey,
                                                   akey, None, None, 2)

            saved_oh = obj.obj_handle
            obj.obj_handle = 99999

            obj, dummy_tx = container.write_an_obj(thedata, thedatasize, dkey,
                                                   akey, obj, None, 2)

            container.oh = saved_oh
            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            self.fail("Test was expected to return a -1002 but it has not.\n")

        except DaosApiError as excep:
            container.oh = saved_oh
            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            self.plog.info("Test Complete")
            if '-1002' not in str(excep):
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1002 but it has not.\n")
示例#33
0
class RebuildNoCap(TestWithServers):

    """
    Test Class Description:
    This class contains tests for pool rebuild.

    :avocado: recursive
    """

    def setUp(self):
        super(RebuildNoCap, self).setUp()
        # create a pool to test with
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = self.params.get("uid", '/run/pool/createuid/')
        creategid = self.params.get("gid", '/run/pool/creategid/')
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        self.pool = DaosPool(self.context)
        self.pool.create(createmode, createuid, creategid, createsize,
                         createsetid)
        uuid = self.pool.get_uuid_str()

        time.sleep(2)

        # stuff some bogus data into the pool
        how_many_bytes = long(self.params.get("datasize",
                                              '/run/testparams/datatowrite/'))
        exepath = self.prefix +\
                 "/../src/tests/ftest/util/write_some_data.py"
        cmd = "export DAOS_POOL={0}; export DAOS_SVCL=1; mpirun"\
              " --np 1 --host {1} {2} {3} testfile".format(
                  uuid, self.hostlist_servers[0], exepath, how_many_bytes)
        subprocess.call(cmd, shell=True)

    def tearDown(self):
        """ cleanup after the test """

        try:
            if self.pool:
                self.pool.destroy(1)
        finally:
            super(RebuildNoCap, self).tearDown()


    def test_rebuild_no_capacity(self):
        """
        :avocado: tags=pool,rebuild,nocap
        """
        try:
            print("\nsetup complete, starting test\n")

            # create a server object that references on of our pool target hosts
            # and then kill it
            svr_to_kill = int(self.params.get("rank_to_kill",
                                              '/run/testparams/ranks/'))
            d_server = DaosServer(self.context, bytes(self.server_group),
                                  svr_to_kill)

            time.sleep(1)
            d_server.kill(1)

            # exclude the target from the dead server
            self.pool.exclude([svr_to_kill])

            # exclude should trigger rebuild, check
            self.pool.connect(1 << 1)
            status = self.pool.pool_query()
            if not status.pi_ntargets == len(self.hostlist_servers):
                self.fail("target count wrong.\n")
            if not status.pi_ndisabled == 1:
                self.fail("disabled target count wrong.\n")

            # the pool should be too full to start a rebuild so
            # expecting an error
            # not sure yet specifically what error
            if status.pi_rebuild_st.rs_errno == 0:
                self.fail("expecting rebuild to fail but it didn't.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")
示例#34
0
文件: nvme_io.py 项目: morsiee/daos
class NvmeIo(TestWithServers):
    """
    Test Class Description:
        Test the general Metadata operations and boundary conditions.
    :avocado: recursive
    """

    def setUp(self):
        super(NvmeIo, self).setUp()

        # initialize variables
        self.out_queue = None
        self.pool_connect = False

    def verify_pool_size(self, original_pool_info, ior_args):
        """
        Function is to validate the pool size
        original_pool_info: Pool info prior to IOR
        ior_args: IOR args to calculate the file size
        """
        #Get the current pool size for comparison
        current_pool_info = self.pool.pool_query()
        #if Transfer size is < 4K, Pool size will verified against NVMe, else
        #it will be checked against SCM
        if ior_args['transfer_size'] >= 4096:
            print("Size is > 4K,Size verification will be done with NVMe size")
            storage_index = 1
        else:
            print("Size is < 4K,Size verification will be done with SCM size")
            storage_index = 0

        free_pool_size = (
            original_pool_info.pi_space.ps_space.s_free[storage_index]
            - current_pool_info.pi_space.ps_space.s_free[storage_index])

        obj_multiplier = 1
        replica_number = re.findall(r'\d+', "ior_args['object_class']")
        if replica_number:
            obj_multiplier = int(replica_number[0])
        expected_pool_size = (ior_args['client_processes'] *
                              ior_args['block_size'] * obj_multiplier)

        if free_pool_size < expected_pool_size:
            raise DaosTestError(
                'Pool Free Size did not match Actual = {} Expected = {}'
                .format(free_pool_size, expected_pool_size))

    @avocado.fail_on(DaosApiError)
    def test_nvme_io(self):
        """
        Test ID: DAOS-2082
        Test Description: Test will run IOR with standard and non standard
        sizes.IOR will be run for all Object type supported. Purpose is to
        verify pool size (SCM and NVMe) for IOR file.
        This test is running multiple IOR on same server start instance.
        :avocado: tags=nvme,nvme_io,large
        """
        ior_args = {}

        tests = self.params.get("ior_sequence", '/run/ior/*')
        object_type = self.params.get("object_type", '/run/ior/*')
        #Loop for every IOR object type
        for obj_type in object_type:
            for ior_param in tests:
                #There is an issue with NVMe if Transfer size>64M, Skipped this
                #sizes for now
                if ior_param[2] > 67108864:
                    print ("Xfersize > 64M getting failed, DAOS-1264")
                    continue

                self.pool = DaosPool(self.context)
                self.pool.create(self.params.get("mode",
                                                 '/run/pool/createmode/*'),
                                 os.geteuid(),
                                 os.getegid(),
                                 ior_param[0],
                                 self.params.get("setname",
                                                 '/run/pool/createset/*'),
                                 nvme_size=ior_param[1])
                self.pool.connect(1 << 1)
                self.pool_connect = True
                createsvc = self.params.get("svcn", '/run/pool/createsvc/')
                svc_list = ""
                for i in range(createsvc):
                    svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":"
                svc_list = svc_list[:-1]

                ior_args['client_hostfile'] = self.hostfile_clients
                ior_args['pool_uuid'] = self.pool.get_uuid_str()
                ior_args['svc_list'] = svc_list
                ior_args['basepath'] = self.basepath
                ior_args['server_group'] = self.server_group
                ior_args['tmp_dir'] = self.workdir
                ior_args['iorflags'] = self.params.get("iorflags",
                                                       '/run/ior/*')
                ior_args['iteration'] = self.params.get("iteration",
                                                        '/run/ior/*')
                ior_args['transfer_size'] = ior_param[2]
                ior_args['block_size'] = ior_param[3]
                ior_args['object_class'] = obj_type
                ior_args['client_processes'] = ior_param[4]

                try:
                    size_before_ior = self.pool.pool_query()
                    ior_utils.run_ior_daos(ior_args['client_hostfile'],
                                           ior_args['iorflags'],
                                           ior_args['iteration'],
                                           ior_args['block_size'],
                                           ior_args['transfer_size'],
                                           ior_args['pool_uuid'],
                                           ior_args['svc_list'],
                                           ior_args['object_class'],
                                           ior_args['basepath'],
                                           ior_args['client_processes'],
                                           cont_uuid=str(uuid.uuid4()),
                                           display_output=True)
                    self.verify_pool_size(size_before_ior, ior_args)
                except ior_utils.IorFailed as exe:
                    print (exe)
                    print (traceback.format_exc())
                    self.fail()
                try:
                    if self.pool_connect:
                        self.pool.disconnect()
                        self.pool_connect = False
                    if self.pool:
                        self.pool.destroy(1)
                except DaosApiError as exe:
                    print (exe)
                    self.fail("Failed to Destroy/Disconnect the Pool")
示例#35
0
class LlnlMpi4py(Test):
    """
    Runs LLNL and MPI4PY test suites.
    """
    def setUp(self):
        # initialising variables
        self.basepath = None
        self.server_group = None
        self.context = None
        self.pool = None
        self.mpio = None
        self.agent_sessions = None
        self.hostlist_servers = None
        self.hostfile_servers = None
        self.hostlist_clients = None
        self.hostfile_clients = None
        self.createsvc = None

        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as var_file:
            build_paths = json.load(var_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.hostlist_servers = self.params.get("test_servers", '/run/hosts/')
        self.hostfile_servers = write_host_file.write_host_file(
            self.hostlist_servers, self.workdir)
        print("Host file servers is: {}".format(self.hostfile_servers))

        self.hostlist_clients = self.params.get("test_clients", '/run/hosts/')
        self.hostfile_clients = write_host_file.write_host_file(
            self.hostlist_clients, self.workdir, None)
        print("Host file clients is: {}".format(self.hostfile_clients))

        self.agent_sessions = AgentUtils.run_agent(self.basepath,
                                                   self.hostlist_servers,
                                                   self.hostlist_clients)
        # start servers
        server_utils.run_server(self.hostfile_servers, self.server_group,
                                self.basepath)
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/pool/createmode/*/')
            createuid = os.geteuid()
            creategid = os.getegid()
            createsetid = self.params.get("setname", '/run/pool/createset/')
            createsize = self.params.get("size", '/run/pool/createsize/')
            self.createsvc = self.params.get("svcn", '/run/pool/createsvc/')

            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid, createsize,
                             createsetid, None, None, self.createsvc)
        except (DaosApiError) as excep:
            self.fail("<Test Failed at pool create> \n{1}".format(excep))

    def tearDown(self):
        try:
            if self.pool is not None:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist_clients,
                                      self.agent_sessions)
        server_utils.stop_server(hosts=self.hostlist_servers)

    def run_test(self, test_repo, test_name):
        """
        Executable function to be used by test functions below
        test_repo       --location of test repository
        test_name       --name of the test to be run
        """
        # initialize MpioUtils
        self.mpio = MpioUtils()
        if not self.mpio.mpich_installed(self.hostlist_clients):
            self.fail("Exiting Test: Mpich not installed")

        try:
            # initialise test specific variables
            client_processes = self.params.get("np", '/run/client_processes/')

            # obtaining pool uuid and svc list
            pool_uuid = self.pool.get_uuid_str()
            svc_list = ""
            for i in range(self.createsvc):
                svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":"
            svc_list = svc_list[:-1]

            # running tests
            self.mpio.run_llnl_mpi4py(self.basepath, self.hostfile_clients,
                                      pool_uuid, test_repo, test_name,
                                      client_processes)

            # Parsing output to look for failures
            # stderr directed to stdout
            stdout = self.logdir + "/stdout"
            searchfile = open(stdout, "r")
            error_message = [
                "non-zero exit code", "MPI_Abort", "MPI_ABORT", "ERROR"
            ]

            for line in searchfile:
                for i in range(len(error_message)):
                    if error_message[i] in line:
                        self.fail("Test Failed with error_message: {}".format(
                            error_message[i]))

        except (MpioFailed, DaosApiError) as excep:
            self.fail("<{0} Test Failed> \n{1}".format(test_name, excep))

    def test_llnl(self):
        """
        Test ID: DAOS-2231
        Run LLNL test provided in mpich package
        Testing various I/O functions provided in llnl test suite
        :avocado: tags=mpio,llnlmpi4py,llnl
        """
        test_repo = self.params.get("llnl", '/run/test_repo/')
        self.run_test(test_repo, "llnl")

    def test_mpi4py(self):
        """
        Test ID: DAOS-2231
        Run LLNL test provided in mpich package
        Testing various I/O functions provided in llnl test suite
        :avocado: tags=mpio,llnlmpi4py,mpi4py
        """
        test_repo = self.params.get("mpi4py", '/run/test_repo/')
        self.run_test(test_repo, "mpi4py")
示例#36
0
class EightServers(Test):
    """
    Test class Description: Runs IOR with 8 servers.

    """

    def __init__(self, *args, **kwargs):

        super(EightServers, self).__init__(*args, **kwargs)

        self.basepath = None
        self.server_group = None
        self.context = None
        self.pool = None
        self.num_procs = None
        self.hostlist_servers = None
        self.hostfile_servers = None
        self.hostlist_clients = None
        self.hostfile_clients = None
        self.mpio = None

    def setUp(self):
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        print("<<{}>>".format(self.basepath))
        self.server_group = self.params.get("name", '/server_config/',
                                            'daos_server')

        # setup the DAOS python API
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.hostlist_servers = self.params.get("test_servers",
                                                '/run/hosts/test_machines/*')
        self.hostfile_servers = (
            write_host_file.write_host_file(self.hostlist_servers,
                                            self.workdir))
        print("Host file servers is: {}".format(self.hostfile_servers))

        self.hostlist_clients = self.params.get("test_clients",
                                                '/run/hosts/test_machines/*')
        self.num_procs = self.params.get("np", '/run/ior/client_processes/*')
        self.hostfile_clients = (
            write_host_file.write_host_file(self.hostlist_clients, self.workdir,
                                            None))
        print("Host file clients is: {}".format(self.hostfile_clients))

        server_utils.run_server(self.hostfile_servers, self.server_group,
                                self.basepath)

    def tearDown(self):
        try:
            if self.pool is not None and self.pool.attached:
                self.pool.destroy(1)
        finally:
            server_utils.stop_server(hosts=self.hostlist_servers)

    def executable(self, iorflags=None):
        """
        Executable function to run ior for ssf and fpp
        """

        # parameters used in pool create
        createmode = self.params.get("mode", '/run/pool/createmode/*/')
        createuid = os.geteuid()
        creategid = os.getegid()
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createscm_size = self.params.get("scm_size", '/run/pool/createsize/')
        createnvme_size = self.params.get("nvme_size", '/run/pool/createsize/')
        createsvc = self.params.get("svcn", '/run/pool/createsvc/')
        iteration = self.params.get("iter", '/run/ior/iteration/')
        block_size = self.params.get("b", '/run/ior/transfersize_blocksize/*/')
        transfer_size = self.params.get("t",
                                        '/run/ior/transfersize_blocksize/*/')

        try:
            # initialize MpioUtils
            self.mpio = MpioUtils()
            if self.mpio.mpich_installed(self.hostlist_clients) is False:
                self.fail("Exiting Test: Mpich not installed")

            #print self.mpio.mpichinstall
            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createscm_size, createsetid, None, None, createsvc,
                             createnvme_size)

            pool_uuid = self.pool.get_uuid_str()
            svc_list = ""
            for i in range(createsvc):
                svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":"
            svc_list = svc_list[:-1]

            print ("svc_list: {}".format(svc_list))

            ior_utils.run_ior_mpiio(self.basepath, self.mpio.mpichinstall,
                                    pool_uuid, svc_list, self.num_procs,
                                    self.hostfile_clients, iorflags, iteration,
                                    transfer_size, block_size, True)

        except (DaosApiError, MpioFailed) as excep:
            print(excep)

    def test_ssf(self):
        """
        Test ID: DAOS-2121
        Test Description: Run IOR with 1,64 and 128 clients config in ssf mode.
        Use Cases: Different combinations of 1/64/128 Clients,
                   1K/4K/32K/128K/512K/1M transfersize and block size of 32M
                   for 1K transfer size and 128M for rest.
        :avocado: tags=ior,mpiio,eightservers,ior_ssf
        """
        ior_flags = self.params.get("F", '/run/ior/iorflags/ssf/')
        self.executable(ior_flags)

    def test_fpp(self):
        """
        Test ID: DAOS-2121
        Test Description: Run IOR with 1,64 and 128 clients config in fpp mode.
        Use Cases: Different combinations of 1/64/128 Clients,
                   1K/4K/32K/128K/512K/1M transfersize and block size of 32M
                   for 1K transfer size and 128M for rest.
        :avocado: tags=ior,mpiio,eightservers,ior_fpp
        """
        ior_flags = self.params.get("F", '/run/ior/iorflags/fpp/')
        self.executable(ior_flags)
示例#37
0
    def test_array_obj(self):
        """
        Test ID: DAOS-961

        Test Description: Writes an array to an object and then reads it
        back and verifies it.

        :avocado: tags=object,arrayobj,regression,vm,small
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/pool_params/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/pool_params/createset/')
            createsize = self.params.get("size", '/run/pool_params/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            self.plog.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            container.query()

            if container.get_uuid_str() != c_uuid_to_str(
                    container.info.ci_uuid):
                self.fail("Container UUID did not match the one in info\n")

            # create an object and write some data into it
            thedata = []
            thedata.append("data string one")
            thedata.append("data string two")
            thedata.append("data string tre")
            dkey = "this is the dkey"
            akey = "this is the akey"

            self.plog.info("writing array to dkey >%s< akey >%s<.", dkey, akey)
            oid, epoch = container.write_an_array_value(thedata, dkey, akey,
                                                        obj_cls=3)

            # read the data back and make sure its correct
            length = len(thedata[0])
            thedata2 = container.read_an_array(len(thedata), length+1,
                                               dkey, akey, oid, epoch)
            if thedata[0][0:length-1] != thedata2[0][0:length-1]:
                self.plog.error("Data mismatch")
                self.plog.error("Wrote: >%s<", thedata[0])
                self.plog.error("Read: >%s<", thedata2[0])
                self.fail("Write data, read it back, didn't match\n")

            if thedata[2][0:length-1] != thedata2[2][0:length-1]:
                self.plog.error("Data mismatch")
                self.plog.error("Wrote: >%s<", thedata[2])
                self.plog.error("Read: >%s<", thedata2[2])
                self.fail("Write data, read it back, didn't match\n")

            container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            container.destroy()

            # cleanup the pool
            pool.disconnect()
            pool.destroy(1)
            self.plog.info("Test Complete")

        except DaosApiError as excep:
            self.plog.error("Test Failed, exception was thrown.")
            print(excep)
            print(traceback.format_exc())
            self.fail("Test was expected to pass but it failed.\n")
示例#38
0
class LlnlMpi4pyHdf5(TestWithServers):
    """
    Runs LLNL, MPI4PY and HDF5 test suites.
    :avocado: recursive
    """

    def setUp(self):
        super(LlnlMpi4pyHdf5, self).setUp()
        # initialising variables
        self.mpio = None
        self.hostfile_clients = None

        # setting client variables
        self.hostfile_clients = write_host_file.write_host_file(
            self.hostlist_clients, self.workdir, None)
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/pool/createmode/*/')
            createuid = os.geteuid()
            creategid = os.getegid()
            createsetid = self.params.get("setname", '/run/pool/createset/')
            createsize = self.params.get("size", '/run/pool/createsize/')
            self.createsvc = self.params.get("svcn", '/run/pool/createsvc/')

            # initialize a python pool object then create the underlying
            # daos storage
            self.pool = DaosPool(self.context)
            self.pool.create(createmode, createuid, creategid,
                             createsize, createsetid, None, None,
                             self.createsvc)
        except (DaosApiError) as excep:
            self.fail("<Test Failed at pool create> \n{1}".format(excep))

    def run_test(self, test_repo, test_name):
        """
        Executable function to be used by test functions below
        test_repo       --location of test repository
        test_name       --name of the test to be run
        """
        # initialize MpioUtils
        self.mpio = MpioUtils()
        if not self.mpio.mpich_installed(self.hostlist_clients):
            self.fail("Exiting Test: Mpich not installed")

        try:
            # initialise test specific variables
            client_processes = self.params.get("np", '/run/client_processes/')

            # obtaining pool uuid and svc list
            pool_uuid = self.pool.get_uuid_str()
            svc_list = ""
            for i in range(self.createsvc):
                svc_list += str(int(self.pool.svc.rl_ranks[i])) + ":"
            svc_list = svc_list[:-1]

            # running tests
            self.mpio.run_llnl_mpi4py_hdf5(self.basepath, self.hostfile_clients,
                                           pool_uuid, test_repo, test_name,
                                           client_processes)

            # Parsing output to look for failures
            # stderr directed to stdout
            stdout = self.logdir + "/stdout"
            searchfile = open(stdout, "r")
            error_message = ["non-zero exit code", "MPI_Abort", "MPI_ABORT",
                             "ERROR"]

            for line in searchfile:
                # pylint: disable=C0200
                for i in range(len(error_message)):
                    if error_message[i] in line:
                        self.fail("Test Failed with error_message: {}"
                                  .format(error_message[i]))

        except (MpioFailed, DaosApiError) as excep:
            self.fail("<{0} Test Failed> \n{1}".format(test_name, excep))

    @skipForTicket("CORCI-635")
    def test_llnl(self):
        """
        Jira ID: DAOS-2231
        Test Description: Run LLNL test suite.
        Testing various I/O functions provided in llnl test suite
        such as:-
        test_collective, test_datareps, test_errhandlers,
        test_filecontrol, test_localpointer, test_manycomms,
        test_manyopens, test_openclose, test_openmodes,
        test_nb_collective, test_nb_localpointer, test_nb_rdwr,
        test_nb_readwrite, test_rdwr, test_readwrite

        :avocado: tags=all,mpiio,smoke,pr,small,llnlmpi4py
        """
        test_repo = self.params.get("llnl", '/run/test_repo/')
        self.run_test(test_repo, "llnl")

    @skipForTicket("CORCI-635")
    def test_mpi4py(self):
        """
        Jira ID: DAOS-2231
        Test Description: Run mpi4py io test provided in mpi4py package
        Testing various I/O functions provided in mpi4py test suite
        such as:-
        testReadWriteAt, testIReadIWriteAt, testReadWrite
        testIReadIWrite, testReadWriteAtAll, testIReadIWriteAtAll
        testReadWriteAtAllBeginEnd, testReadWriteAll
        testIReadIWriteAll, testReadWriteAllBeginEnd

        :avocado: tags=all,mpiio,pr,small,llnlmpi4py,mpi4py
        """
        test_repo = self.params.get("mpi4py", '/run/test_repo/')
        self.run_test(test_repo, "mpi4py")

    @skipForTicket("CORCI-635")
    def test_hdf5(self):
        """
        Jira ID: DAOS-2252
        Test Description: Run HDF5 testphdf5 and t_shapesame provided in
        HDF5 package. Testing various I/O functions provided in HDF5 test
        suite such as:-
        test_fapl_mpio_dup, test_split_comm_access, test_page_buffer_access,
        test_file_properties, dataset_writeInd, dataset_readInd,
        dataset_writeAll, dataset_readAll, extend_writeInd, extend_readInd,
        extend_writeAll, extend_readAll,extend_writeInd2,none_selection_chunk,
        zero_dim_dset, multiple_dset_write, multiple_group_write,
        multiple_group_read, compact_dataset, collective_group_write,
        independent_group_read, big_dataset, coll_chunk1, coll_chunk2,
        coll_chunk3, coll_chunk4, coll_chunk5, coll_chunk6, coll_chunk7,
        coll_chunk8, coll_chunk9, coll_chunk10, coll_irregular_cont_write,
        coll_irregular_cont_read, coll_irregular_simple_chunk_write,
        coll_irregular_simple_chunk_read , coll_irregular_complex_chunk_write,
        coll_irregular_complex_chunk_read , null_dataset , io_mode_confusion,
        rr_obj_hdr_flush_confusion, chunk_align_bug_1,lower_dim_size_comp_test,
        link_chunk_collective_io_test, actual_io_mode_tests,
        no_collective_cause_tests, test_plist_ed, file_image_daisy_chain_test,
        test_dense_attr, test_partial_no_selection_coll_md_read

        :avocado: tags=mpio,llnlmpi4pyhdf5,hdf5
        """
        test_repo = self.params.get("hdf5", '/run/test_repo/')
        self.run_test(test_repo, "hdf5")
示例#39
0
文件: InfoTests.py 项目: chen0qi/daos
class InfoTests(Test):
    """
    Tests DAOS pool query.
    """
    def setUp(self):
        # get paths from the build_vars generated by build
        with open(
                os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             "../../../../.build_vars.json")) as f:
            build_paths = json.load(f)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        self.tmp = build_paths['PREFIX'] + '/tmp'
        self.server_group = self.params.get("server_group", '/server/',
                                            'daos_server')

        context = DaosContext(build_paths['PREFIX'] + '/lib/')
        print("initialized!!!\n")

        self.pool = DaosPool(context)
        self.hostlist = self.params.get("test_machines1", '/run/hosts/')
        self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.tmp)
        ServerUtils.runServer(self.hostfile, self.server_group, self.basepath)

    def tearDown(self):
        # shut 'er down
        self.pool.destroy(1)
        ServerUtils.stopServer()
        os.remove(self.hostfile)

    def test_simple_query(self):
        """
        Test querying a pool created on a single server.

        :avocado: tags=pool,poolquery,infotest
        """
        # there is a presumption that this test lives in a specific spot
        # in the repo

        # create pool
        mode = self.params.get("mode", '/run/testparams/modes/*', 0731)
        uid = os.geteuid()
        gid = os.getegid()
        size = self.params.get("size", '/run/testparams/sizes/*', 0)
        tgt_list = None
        group = self.server_group

        self.pool.create(mode, uid, gid, size, group, tgt_list)
        PROGRESS_LOG.info("created pool")

        # connect to the pool
        flags = self.params.get("perms", '/run/testparams/connectperms/*', '')
        connect_flags = 1 << flags
        self.pool.connect(connect_flags)
        PROGRESS_LOG.info("connected to pool")

        # query the pool
        pool_info = self.pool.pool_query()
        PROGRESS_LOG.info("queried pool info")

        # check uuid
        uuid_str = c_uuid_to_str(pool_info.pi_uuid)
        PROGRESS_LOG.info("pool uuid pool_info.pi_uuid: {0}".format(uuid_str))
        PROGRESS_LOG.info("pool uuid saved in api at create time: "
                          "{0}".format(self.pool.get_uuid_str()))
        if uuid_str != self.pool.get_uuid_str():
            self.fail("UUID str does not match expected string")

        # validate size of pool is what we expect
        PROGRESS_LOG.info("pool should be {0} bytes".format(size))
        PROGRESS_LOG.info("pool actual space is {0} bytes".format(
            pool_info.pi_space))
        '''
        This check is currently disabled, as space is not implemented in
        DAOS C API yet.
        if size != pool_info.pi_space:
            self.fail("expected size {0} did not match actual size {1}"
                      .format(size, pool_info.pi_space))
        '''

        # number of targets
        PROGRESS_LOG.info("number of targets in pool: %s",
                          pool_info.pi_ntargets)
        if pool_info.pi_ntargets != len(self.hostlist):
            self.fail("found number of targets in pool did not match "
                      "expected number, 1. num targets: {0}".format(
                          pool_info.pi_ntargets))

        # number of disabled targets
        PROGRESS_LOG.info("number of disabled targets in pool: %s",
                          pool_info.pi_ndisabled)
        if pool_info.pi_ndisabled > 0:
            self.fail("found disabled targets, none expected to be disabled")

        # mode
        PROGRESS_LOG.info("pool mode: %s", pool_info.pi_mode)
        if pool_info.pi_mode != mode:
            self.fail("found different mode than expected. expected {0}, "
                      "found {1}.".format(mode, pool_info.pi_mode))

        # uid
        PROGRESS_LOG.info("expected uid is {0}".format(uid))
        if pool_info.pi_uid != uid:
            self.fail("found actual pool uid {0} does not match expected uid "
                      "{1}".format(pool_info.pi_uid, uid))

        # gid
        PROGRESS_LOG.info("expected gid is {0}".format(gid))
        if pool_info.pi_gid != gid:
            self.fail("found actual pool gid {0} does not match expected gid "
                      "{1}".format(pool_info.pi_gid, gid))
示例#40
0
class RebuildNoCap(Test):

    """
    Test Class Description:
    This class contains tests for pool rebuild.

    :avocado: tags=pool,rebuild,nocap
    """
    def setUp(self):
        """ setup for the test """
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        # generate a hostfile
        self.hostlist = self.params.get("test_machines", '/run/hosts/')
        self.hostfile = write_host_file.write_host_file(self.hostlist,
                                                        self.workdir)

        # fire up the DAOS servers
        self.server_group = self.params.get("name", '/run/server_config/',
                                            'daos_server')
        self.agent_sessions = AgentUtils.run_agent(self.basepath, self.hostlist)
        server_utils.run_server(self.hostfile, self.server_group,
                                build_paths['PREFIX'] + '/../')

        # create a pool to test with
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = self.params.get("uid", '/run/pool/createuid/')
        creategid = self.params.get("gid", '/run/pool/creategid/')
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        self.pool = DaosPool(self.context)
        self.pool.create(createmode, createuid, creategid, createsize,
                         createsetid)
        uuid = self.pool.get_uuid_str()

        time.sleep(2)

        # stuff some bogus data into the pool
        how_many_bytes = long(self.params.get("datasize",
                                              '/run/testparams/datatowrite/'))
        exepath = os.path.join(build_paths['PREFIX'],
                               "/../src/tests/ftest/util/write_some_data.py")
        cmd = "export DAOS_POOL={0}; export DAOS_SVCL=1; mpirun"\
              " --np 1 --host {1} {2} {3} testfile".format(
                  uuid, self.hostlist[0], exepath, how_many_bytes)
        subprocess.call(cmd, shell=True)

    def tearDown(self):
        """ cleanup after the test """

        try:
            os.remove(self.hostfile)
            if self.pool:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                AgentUtils.stop_agent(self.hostlist, self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist)


    def test_rebuild_no_capacity(self):
        """
        :avocado: tags=pool,rebuild,nocap
        """
        try:
            print("\nsetup complete, starting test\n")

            # create a server object that references on of our pool target hosts
            # and then kill it
            svr_to_kill = int(self.params.get("rank_to_kill",
                                              '/run/testparams/ranks/'))
            d_server = DaosServer(self.context, bytes(self.server_group),
                                  svr_to_kill)

            time.sleep(1)
            d_server.kill(1)

            # exclude the target from the dead server
            self.pool.exclude([svr_to_kill])

            # exclude should trigger rebuild, check
            self.pool.connect(1 << 1)
            status = self.pool.pool_query()
            if not status.pi_ntargets == len(self.hostlist):
                self.fail("target count wrong.\n")
            if not status.pi_ndisabled == 1:
                self.fail("disabled target count wrong.\n")

            # the pool should be too full to start a rebuild so
            # expecting an error
            # not sure yet specifically what error
            if status.pi_rebuild_st.rs_errno == 0:
                self.fail("expecting rebuild to fail but it didn't.\n")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")
示例#41
0
class InfoTests(Test):
    """
    Tests DAOS pool query.
    """
    def setUp(self):
        # get paths from the build_vars generated by build
        with open(
                os.path.join(os.path.dirname(os.path.realpath(__file__)),
                             "../../../../.build_vars.json")) as f:
            build_paths = json.load(f)
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")
        self.tmp = build_paths['PREFIX'] + '/tmp'
        self.server_group = self.params.get("server_group", '/server/',
                                            'daos_server')

        context = DaosContext(build_paths['PREFIX'] + '/lib/')

        self.pool = DaosPool(context)
        self.d_log = DaosLog(context)
        self.hostlist = self.params.get("test_machines1", '/run/hosts/')
        self.hostfile = WriteHostFile.WriteHostFile(self.hostlist, self.tmp)
        ServerUtils.runServer(self.hostfile, self.server_group, self.basepath)

    def tearDown(self):
        # shut 'er down
        try:
            if self.pool:
                self.pool.destroy(1)
            os.remove(self.hostfile)
        finally:
            ServerUtils.stopServer(hosts=self.hostlist)

    def test_simple_query(self):
        """
        Test querying a pool created on a single server.

        :avocado: tags=pool,poolquery,infotest
        """
        # create pool
        mode = self.params.get("mode", '/run/testparams/modes/*', 0731)
        if mode == 73:
            self.cancel('Cancel the mode test 73 because of DAOS-1877')

        uid = os.geteuid()
        gid = os.getegid()
        size = self.params.get("size", '/run/testparams/sizes/*', 0)
        group = self.server_group

        self.pool.create(mode, uid, gid, size, group, None)

        # connect to the pool
        flags = self.params.get("perms", '/run/testparams/connectperms/*', '')
        connect_flags = 1 << flags
        self.pool.connect(connect_flags)

        # query the pool
        pool_info = self.pool.pool_query()

        # check uuid
        uuid_str = c_uuid_to_str(pool_info.pi_uuid)
        if uuid_str != self.pool.get_uuid_str():
            self.d_log.error("UUID str does not match expected string")
            self.fail("UUID str does not match expected string")
        '''
        # validate size of pool is what we expect
        This check is currently disabled, as space is not implemented in
        DAOS C API yet.
        if size != pool_info.pi_space:
            self.d_log.error("expected size {0} did not match actual size {1}"
                      .format(size, pool_info.pi_space))
            self.fail("expected size {0} did not match actual size {1}"
                      .format(size, pool_info.pi_space))
        '''

        # number of targets
        if pool_info.pi_ntargets != len(self.hostlist):
            self.d_log.error("found number of targets in pool did not match "
                             "expected number, 1. num targets: {0}".format(
                                 pool_info.pi_ntargets))
            self.fail("found number of targets in pool did not match "
                      "expected number, 1. num targets: {0}".format(
                          pool_info.pi_ntargets))

        # number of disabled targets
        if pool_info.pi_ndisabled > 0:
            self.d_log.error("found disabled targets, none expected to be")
            self.fail("found disabled targets, none expected to be disabled")

        # mode
        if pool_info.pi_mode != mode:
            self.d_log.error(
                "found different mode than expected. expected {0}, "
                "found {1}.".format(mode, pool_info.pi_mode))
            self.fail("found different mode than expected. expected {0}, "
                      "found {1}.".format(mode, pool_info.pi_mode))

        # uid
        if pool_info.pi_uid != uid:
            self.d_log.error(
                "found actual pool uid {0} does not match expected "
                "uid {1}".format(pool_info.pi_uid, uid))
            self.fail("found actual pool uid {0} does not match expected uid "
                      "{1}".format(pool_info.pi_uid, uid))

        # gid
        if pool_info.pi_gid != gid:
            self.d_log.error(
                "found actual pool gid {0} does not match expected "
                "gid {1}".format(pool_info.pi_gid, gid))
            self.fail("found actual pool gid {0} does not match expected gid "
                      "{1}".format(pool_info.pi_gid, gid))
示例#42
0
class DestroyRebuild(Test):

    """
    Test Class Description:
    This test verifies destruction of a pool that is rebuilding.

    :avocado: recursive
    """

    build_paths = []
    server_group = ""
    context = None
    pool = None
    hostfile_servers = ""

    def setUp(self):
        """ setup for the test """
        self.agent_sessions = None
        # get paths from the build_vars generated by build
        with open('../../../.build_vars.json') as build_file:
            build_paths = json.load(build_file)
        self.context = DaosContext(build_paths['PREFIX'] + '/lib/')
        self.basepath = os.path.normpath(build_paths['PREFIX'] + "/../")

        # generate a hostfile
        self.hostlist_servers = self.params.get("test_machines", '/run/hosts/')
        self.hostfile_servers = write_host_file.write_host_file(
            self.hostlist_servers, self.workdir)

        # fire up the DAOS servers
        self.server_group = self.params.get("name", '/run/server_config/',
                                            'daos_server')
        self.agent_sessions = agent_utils.run_agent(self.basepath,
                                                    self.hostlist_servers)
        server_utils.run_server(self.hostfile_servers, self.server_group,
                                build_paths['PREFIX'] + '/../')

        # create a pool to test with
        createmode = self.params.get("mode", '/run/pool/createmode/')
        createuid = self.params.get("uid", '/run/pool/createuid/')
        creategid = self.params.get("gid", '/run/pool/creategid/')
        createsetid = self.params.get("setname", '/run/pool/createset/')
        createsize = self.params.get("size", '/run/pool/createsize/')
        self.pool = DaosPool(self.context)
        self.pool.create(createmode, createuid, creategid, createsize,
                         createsetid)
        self.pool.get_uuid_str()

        time.sleep(2)

    def tearDown(self):
        """ cleanup after the test """

        try:
            os.remove(self.hostfile_servers)
            if self.pool:
                self.pool.destroy(1)
        finally:
            if self.agent_sessions:
                agent_utils.stop_agent(self.agent_sessions)
            server_utils.stop_server(hosts=self.hostlist_servers)


    def test_destroy_while_rebuilding(self):
        """
        :avocado: tags=pool,pooldestroy,rebuild,desreb
        """
        try:
            print("\nsetup complete, starting test\n")

            # create a server object that references on of our pool target hosts
            # and then kill it
            svr_to_kill = int(self.params.get("rank_to_kill",
                                              '/run/testparams/ranks/'))
            server = DaosServer(self.context, bytes(self.server_group),
                                svr_to_kill)

            print("created server ")

            # BUG if you don't connect the rebuild doesn't start correctly
            self.pool.connect(1 << 1)
            status = self.pool.pool_query()
            if not status.pi_ntargets == len(self.hostlist_servers):
                self.fail("target count wrong.\n")
            if not status.pi_ndisabled == 0:
                self.fail("disabled target count wrong.\n")

            print("connect ")

            time.sleep(1)
            server.kill(1)

            print("killed server ")

            # exclude the target from the dead server
            self.pool.exclude([svr_to_kill])

            print("exclude target ")

            #self.pool.disconnect()
            #print "disconnect "

            # the rebuild won't take long since there is no data so do
            # the destroy quickly
            self.pool.destroy(1)
            print("destroy ")

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Expecting to pass but test has failed.\n")
示例#43
0
    def test_array_obj(self):
        """
        Test ID: DAOS-961

        Test Description: Writes an array to an object and then reads it
        back and verifies it.

        :avocado: tags=object,arrayobj,regression,vm,small
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()
            print("uid is {} gid is {}".format(createuid, creategid))

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.Context)
            pool.create(createmode, createuid, creategid, createsize,
                        createsetid, None)
            self.pl.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.Context)
            container.create(pool.handle)
            self.pl.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # do a query and compare the UUID returned from create with
            # that returned by query
            container.query()

            if container.get_uuid_str() != c_uuid_to_str(
                    container.info.ci_uuid):
                self.fail("Container UUID did not match the one in info\n")

            # create an object and write some data into it
            thedata = []
            thedata.append("data string one")
            thedata.append("data string two")
            thedata.append("data string tre")
            dkey = "this is the dkey"
            akey = "this is the akey"

            self.pl.info("writing array to dkey >%s< akey >%s<.", dkey, akey)
            oid, epoch = container.write_an_array_value(thedata, dkey, akey)

            # read the data back and make sure its correct
            length = len(thedata[0])
            thedata2 = container.read_an_array(len(thedata), length + 1, dkey,
                                               akey, oid, epoch)
            if thedata[0][0:length - 1] != thedata2[0][0:length - 1]:
                self.pl.error("Data mismatch")
                self.pl.error("Wrote: >%s<" (thedata[0]))
                self.pl.error("Read: >%s<" (thedata2[0]))
                self.fail("Write data, read it back, didn't match\n")

            if thedata[2][0:length - 1] != thedata2[2][0:length - 1]:
                self.pl.error("Data mismatch")
                self.pl.error("Wrote: >%s<" (thedata[2]))
                self.pl.error("Read: >%s<" (thedata2[2]))
                self.fail("Write data, read it back, didn't match\n")

            container.close()

            # wait a few seconds and then destroy
            time.sleep(5)
            container.destroy()

            # cleanup the pool
            pool.disconnect()
            pool.destroy(1)
            self.pl.info("Test Complete")

        except ValueError as e:
            self.pl.error("Test Failed, exception was thrown.")
            print e
            print traceback.format_exc()
            self.fail("Test was expected to pass but it failed.\n")
示例#44
0
    def test_null_values(self):
        """
        Test ID: DAOS-1376

        Test Description: Pass a dkey and an akey that is null.

        :avocado: tags=object,objupdate,objupdatenull,regression,vm,small
        """
        try:
            # parameters used in pool create
            createmode = self.params.get("mode", '/run/conttests/createmode/')
            createsetid = self.params.get("setname",
                                          '/run/conttests/createset/')
            createsize = self.params.get("size", '/run/conttests/createsize/')
            createuid = os.geteuid()
            creategid = os.getegid()

            # initialize a python pool object then create the underlying
            # daos storage
            pool = DaosPool(self.context)
            pool.create(createmode, createuid, creategid,
                        createsize, createsetid, None)
            self.plog.info("Pool %s created.", pool.get_uuid_str())

            # need a connection to create container
            pool.connect(1 << 1)

            # create a container
            container = DaosContainer(self.context)
            container.create(pool.handle)
            self.plog.info("Container %s created.", container.get_uuid_str())

            # now open it
            container.open()

            # data used in the test
            thedata = "a string that I want to stuff into an object"
            thedatasize = len(thedata) + 1

        except DaosApiError as excep:
            print(excep)
            print(traceback.format_exc())
            self.fail("Test failed during setup .\n")

        try:
            # try using a null dkey
            dkey = None
            akey = "this is the akey"

            container.write_an_obj(thedata, thedatasize, dkey, akey, None,
                                   None, 2)

            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            self.plog.error("Didn't get expected return code.")
            self.fail("Test was expected to return a -1003 but it has not.\n")

        except DaosApiError as excep:
            if '-1003' not in str(excep):
                container.close()
                container.destroy()
                pool.disconnect()
                pool.destroy(1)
                self.plog.error("Didn't get expected return code.")
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1003 but it has not.\n")

        try:
            # try using a null akey/io descriptor
            dkey = "this is the dkey"
            akey = None
            container.write_an_obj(thedata, thedatasize, dkey, akey, None,
                                   None, 2)
            self.fail("Test was expected to return a -1003 but it has not.\n")

        except DaosApiError as excep:
            if '-1003' not in str(excep):
                self.plog.error("Didn't get expected return code.")
                print(excep)
                print(traceback.format_exc())
                self.fail("Test was expected to get -1003 but it has not.\n")

        try:
            # lastly try passing no data
            thedata = None
            thedatasize = 0
            dkey = "this is the dkey"
            akey = "this is the akey"

            container.write_an_obj(thedata, thedatasize, dkey, akey, None,
                                   None, 2)
            self.plog.info("Update with no data worked")

        except DaosApiError as excep:
            container.close()
            container.destroy()
            pool.disconnect()
            pool.destroy(1)
            print(excep)
            print(traceback.format_exc())
            self.plog.error("Update with no data failed")
            self.fail("Update with no data failed.\n")

        container.close()
        container.destroy()
        pool.disconnect()
        pool.destroy(1)
        self.plog.info("Test Complete")