Пример #1
0
    def write_object(self, container, record_qty, akey_size, dkey_size,
                     data_size, rank=None, obj_class=None, data_array_size=0):
        """Write an object to the container.

        Args:
            container (TestContainer): container in which to write the object
            record_qty (int): the number of records to write
            rank (int, optional): rank. Defaults to None.
            obj_class (int, optional): daos object class. Defaults to None.
            data_array_size (optional): write an array or single value.
                                        Defaults to 0.

        Raises:
            DaosTestError: if there was an error writing the object

        """
        for _ in range(record_qty):
            akey = get_random_bytes(akey_size, self.get_akeys())
            dkey = get_random_bytes(dkey_size, self.get_dkeys())
            if data_array_size == 0:
                data = get_random_bytes(data_size)
            else:
                data = [
                    get_random_bytes(data_size)
                    for _ in range(data_array_size)]
            # Write single data to the container
            self.write_record(container, akey, dkey, data, rank, obj_class)
            # Verify the data was written correctly
            data_read = self.read_record(
                container, akey, dkey, data_size, data_array_size)
            if data != data_read:
                raise DaosTestError(
                    "Written data confirmation failed:"
                    "\n  wrote: {}\n  read:  {}".format(data, data_read))
Пример #2
0
def write_until_full(container):
    """Write until we get enospace back.

    Args:
        container (DaosContainer): container in which to write the data

    Returns:
        int: number of bytes written to the container

    """
    total_written = 0
    size = 2048
    _oid = None

    try:
        while True:
            # make some stuff up and write
            dkey = get_random_bytes(5)
            akey = get_random_bytes(5)
            data = get_random_bytes(size)

            _oid = container.write_an_obj(data, size, dkey, akey)
            total_written += size

            # collapse down the committed epochs
            container.slip_epoch()

    except ValueError as exp:
        log = getLogger()
        log.info(exp)

    return total_written
Пример #3
0
def write_quantity(container, size_in_bytes):
    """Write a specific number of bytes.

    Note:
        The minimum amount that will be written is 2048 bytes.

    Args:
        container (DaosContainer): which container to write to, it should be in
            an open state prior to the call
        size_in_bytes (int): total number of bytes to be written, although no
            less that 2048 will be written.

    Returns:
        int: number of bytes written to the container

    """
    total_written = 0
    size = 2048
    _oid = None

    try:
        while total_written < size_in_bytes:

            # make some stuff up and write
            dkey = get_random_bytes(5)
            akey = get_random_bytes(5)
            data = get_random_bytes(size)

            _oid = container.write_an_obj(data, size, dkey, akey)
            total_written += size

            # collapse down the committed epochs
            container.slip_epoch()

    except ValueError as exp:
        log = getLogger()
        log.info(exp)

    return total_written
Пример #4
0
def continuous_io(container, seconds):
    """Perform a combination of reads/writes for the specified time period.

    Args:
        container (DaosContainer): container in which to write the data
        seconds (int): how long to write data

    Returns:
        int: number of bytes written to the container

    Raises:
        ValueError: if a data mismatch is detected

    """
    finish_time = time.time() + seconds
    oid = None
    total_written = 0
    size = 500

    while time.time() < finish_time:
        # make some stuff up
        dkey = get_random_bytes(5)
        akey = get_random_bytes(5)
        data = get_random_bytes(size)

        # write it then read it back
        oid = container.write_an_obj(data, size, dkey, akey, oid, 5)
        data2 = container.read_an_obj(size, dkey, akey, oid)

        # verify it came back correctly
        if data != data2.value:
            raise ValueError("Data mismatch in ContinuousIo")

        # collapse down the committed epochs
        container.consolidate_epochs()

        total_written += size

    return total_written
Пример #5
0
    def create_data_set():
        """Create the large attribute dictionary.

        Returns:
            dict: a large attribute dictionary

        """
        data_set = {}
        for index in range(1024):
            size = random.randint(1, 100)
            key = str(index).encode("utf-8")
            data_set[key] = get_random_bytes(size)
        return data_set
Пример #6
0
def write_array_objects(container,
                        obj_qty,
                        rec_qty,
                        akey_size,
                        dkey_size,
                        data_size,
                        rank,
                        object_class,
                        log=None):
    """Write array objects to the container.

    Args:
        container (DaosContainer): the container in which to write objects
        obj_qty (int): the number of objects to create in the container
        rec_qty (int): the number of records to create in each object
        akey_size (int): the akey length
        dkey_size (int): the dkey length
        data_size (int): the length of data to write in each record
        rank (int): the server rank to which to write the records
        log (DaosLog|None): object for logging messages

    Returns:
        list: a list of dictionaries containing the object, transaction
            number, and data written to the container

    Raises:
        DaosTestError: if an error is detected writing the objects or
            verifying the write of the objects

    """
    if log:
        log.info("Creating objects in the container")
    object_list = []
    for index in range(obj_qty):
        object_list.append({"obj": None, "record": []})
        for _ in range(rec_qty):
            akey = get_random_bytes(
                akey_size,
                [record["akey"] for record in object_list[index]["record"]])
            dkey = get_random_bytes(
                dkey_size,
                [record["dkey"] for record in object_list[index]["record"]])
            data = [get_random_bytes(data_size) for _ in range(data_size)]
            object_list[index]["record"].append({
                "akey": akey,
                "dkey": dkey,
                "data": data
            })

            # Write the data to the container
            try:
                object_list[index]["obj"] = \
                    container.write_an_array_value(
                        data, dkey, akey, object_list[index]["obj"], rank,
                        object_class)
            except DaosApiError as error:
                raise DaosTestError(
                    "Error writing data (dkey={}, akey={}, data={}) to "
                    "the container: {}".format(dkey, akey, data,
                                               error)) from error

            # Verify the data was written to the container
            data_read = read_array_objects(container, data_size, data_size + 1,
                                           dkey, akey,
                                           object_list[index]["obj"])
            if data != data_read:
                raise DaosTestError("Written data confirmation failed:"
                                    "\n  wrote: {}\n  read:  {}".format(
                                        data, data_read))

    return object_list
Пример #7
0
    def test_no_space_cont_create(self):
        """JIRA ID: DAOS-1169 DAOS-7374

        Test Description:
            Purpose of the test is to verify pool and container behave as
            expected in the completely filled scenario.

        Use Case:
            Create Pool and Container.
            Fill the pool completely with different object sizes.
            Verify return code is as expected (-1007) when no more
            data can be written to the a container.
            Once Pool is completely filled, destroy the container
            and verify container can be destroyed in filled state.
            After deleting the container and when aggregation is
            complete, verify the returned space is close enough to
            the original free space.

        :avocado: tags=all,full_regression
        :avocado: tags=hw,small
        :avocado: tags=container
        :avocado: tags=fullpoolcontcreate
        """

        # full storage rc
        err = "-1007"

        # test params
        threshold_percent = self.params.get("threshold_percent", "/run/pool/*")

        # create pool and connect
        self.prepare_pool()

        # query the pool
        self.log.info("Pool Query before write")
        self.pool.set_query_data()
        self.log.info(
            "Pool %s query data: %s\n", self.pool.uuid, self.pool.query_data)

        # create a container
        self.add_container(self.pool)
        self.container.open()

        # get free space before write
        free_space_before = self.pool.get_pool_free_space()
        self.log.info("Pool free space before write: %s", free_space_before)

        # generate random dkey, akey each time
        # write 1M until no space, then 10K, etc. to fill pool quickly
        for obj_sz in [1048576, 10240, 10, 1]:
            write_count = 0
            while True:
                self.d_log.debug("writing obj {0} sz {1} to "
                                 "container".format(write_count, obj_sz))
                my_str = b"a" * obj_sz
                dkey = get_random_bytes(5)
                akey = get_random_bytes(5)
                try:
                    self.container.written_data.append(TestContainerData(False))
                    self.container.written_data[-1].write_record(
                        self.container, akey, dkey, my_str, obj_class='OC_SX')
                    self.d_log.debug("wrote obj {0}, sz {1}".format(write_count,
                                                                    obj_sz))
                    write_count += 1
                except DaosTestError as excep:
                    if not err in repr(excep):
                        self.log.error("caught exception while writing "
                                       "object: %s", repr(excep))
                        self.container.close()
                        self.fail("caught exception while writing "
                                  "object: {}".format(repr(excep)))
                    else:
                        self.log.info("pool is too full for %s byte "
                                      "objects", obj_sz)
                        break

        # query the pool
        self.log.info("Pool Query after filling")
        self.pool.set_query_data()
        self.log.info(
            "Pool %s query data: %s\n", self.pool.uuid, self.pool.query_data)

        # destroy container
        self.container.destroy()

        # check for free space to be returned back once aggregation is complete
        # checking for a closer returned space value instead of exact value
        # as the test is using scm only
        counter = 1
        threshold_value = free_space_before - (free_space_before *
                                               threshold_percent)
        free_space = self.pool.get_pool_free_space()
        while free_space < threshold_value:
            # try to wait for 4 x 30 secs for aggregation to be completed or
            # else exit the test with a failure.
            if counter > 4:
                self.log.info("Free space when test terminated: %s",
                              free_space)
                self.log.info("Threshold value when test terminated: %s",
                              threshold_value)
                self.fail("Aggregation did not complete as expected")
            time.sleep(30)
            free_space = self.pool.get_pool_free_space()
            counter += 1
Пример #8
0
    def test_rebuild_no_capacity(self):
        """Jira ID: DAOS-8846.

        Test Description:
            Create and connect to a pool and container. Full fill the pool
            container, verify the pool information after rebuild, make sure
            correct error of pool full status after rebuild.
            Test steps:
            (1)Check for pool and rebuild info
            (2)Display pool free space before write
            (3)Start write data to full fill the container
            (4)Display pool size after write before rebuild
            (5)Stop rank for rebuild
            (6)Wait for rebuild started
            (7)Poll and verify pool rebuild status with error after rebuild
            (8)Verify pool and rebuild info after rebuild

        Use Cases:
            Full fill pool and verify pool by query after rebuild.

        :avocado: tags=all,daily_regression
        :avocado: tags=hw,medium,ib2
        :avocado: tags=pool,rebuild
        :avocado: tags=no_cap

        """
        # Get the test params
        targets = self.params.get("targets", "/run/server_config/*")
        rank = self.params.get("rank_to_kill", "/run/rebuild/*")
        pool_query_timeout = self.params.get('pool_query_timeout',
                                             "/run/pool/*")
        interval = self.params.get('pool_query_interval', "/run/pool/*")
        test_data_list = self.params.get('test_data_list', "/run/pool/*")
        oclass = self.params.get('oclass', "/run/pool/*")
        err_pool_full = -1007

        # Create a pool and container
        self.prepare_pool()
        self.add_container(self.pool)
        self.container.open()

        # make sure pool looks good before we start
        self.log.info("..(1)Check for pool and rebuild info ")
        pool_checks = {
            "pi_nnodes": len(self.hostlist_servers),
            "pi_ntargets": len(self.hostlist_servers) * targets,
            "pi_ndisabled": 0
        }
        rebuild_checks = {"rs_done": 1, "rs_obj_nr": 0, "rs_rec_nr": 0}
        self.assertTrue(self.pool.check_pool_info(**pool_checks),
                        "#Invalid pool information detected before rebuild")
        self.assertTrue(self.pool.check_rebuild_status(**rebuild_checks),
                        "#Invalid pool rebuild info detected before rebuild")

        # Display pool size before write
        free_space_before = self.pool.get_pool_free_space()
        self.log.info("..(2)Display pool free space before write: %s",
                      free_space_before)

        # Write data to full fill the pool that will not be able to be rebuilt
        self.log.info("..(3)Start write data to full fill the container")
        written_pload = 0
        for payload_size in test_data_list:
            write_count = 0
            while True:
                self.d_log.debug("writing obj {0} sz {1} to "
                                 "container".format(write_count, payload_size))
                my_str = b"A" * payload_size
                dkey = get_random_bytes(5)
                akey = get_random_bytes(5)
                try:
                    written_pload += payload_size
                    self.container.written_data.append(
                        TestContainerData(False))
                    self.container.written_data[-1].write_record(
                        self.container, akey, dkey, my_str, obj_class=oclass)
                    self.d_log.debug("wrote obj {0}, sz {1}".format(
                        write_count, payload_size))
                    write_count += 1
                except DaosTestError as excep:
                    if not str(err_pool_full) in repr(excep):
                        self.log.error(
                            "#caught exception while writing "
                            "object: %s", repr(excep))
                        self.container.close()
                        self.fail("#caught exception while writing "
                                  "object: {}".format(repr(excep)))
                    else:
                        self.log.info(
                            "..pool is too full for %s byte "
                            "objects", payload_size)
                        break

        # Display pool size after write
        free_space_after = self.pool.get_pool_free_space()
        self.log.info("..(4)Pool free space after write: %s", free_space_after)

        # query the pool before rebuild
        self.log.info("....Pool query after filling, written_pload=%s",
                      written_pload)
        self.pool.set_query_data()
        self.log.info("..Pool %s query data: %s\n", self.pool.uuid,
                      self.pool.query_data)

        # Start rebuild
        rank = 1
        self.log.info("..(5)Stop rank for rebuild")
        self.server_managers[0].stop_ranks([rank], self.d_log, force=True)

        # Wait for rebuild started
        self.log.info("..(6)Wait for rebuild started")
        self.pool.wait_for_rebuild(True, interval=1)

        # Verify for pool full error after rebuild
        self.log.info("..(7)Poll and verify pool rebuild status with error")
        status = 0
        retry = 1
        start = time()
        while status != err_pool_full and (time() - start <
                                           pool_query_timeout):
            self.pool.set_query_data()
            status = self.pool.query_data["response"]["rebuild"]["status"]
            state = self.pool.query_data["response"]["rebuild"]["state"]
            self.log.info("===>%s, qdata=%s", retry, self.pool.query_data)
            self.log.info("===>%s status=%s, state=%s", retry, status, state)
            sleep(interval)
            retry += 1
        if status != err_pool_full:
            self.fail("#Pool full with rebuild, error -1007 did not show")

        # Check for pool and rebuild info after rebuild
        self.log.info("..(8)Verify pool and rebuild info after rebuild")
        pool_checks["pi_ndisabled"] = ">0"
        rebuild_checks["rs_obj_nr"] = ">0"
        rebuild_checks["rs_rec_nr"] = ">0"
        rebuild_checks["rs_done"] = 0
        self.assertTrue(self.pool.check_pool_info(**pool_checks),
                        "#Invalid pool information detected before rebuild")
        self.assertTrue(self.pool.check_rebuild_status(**rebuild_checks),
                        "#Invalid pool rebuild info detected before rebuild")
        self.log.info("=Test Passed, expected error -1007 detected after "
                      "rebuild with no pool capacity")
Пример #9
0
    def test_basic_snapshot(self):
        """Test ID: DAOS-1370.

        Test Description:
            Create a pool, container in the pool, object in the container, add
            one key:value to the object.
            Commit the transaction. Perform a snapshot create on the container.
            Create 500 additional transactions with a small change to the object
            in each and commit each after the object update is done.
            Verify the snapshot is still available and the contents remain in
            their original state.

        :avocado: tags=all,daily_regression,snap,basicsnap
        """
        # Set up the pool and container.
        try:
            # initialize a pool object then create the underlying
            # daos storage, and connect
            self.prepare_pool()

            # create a container
            self.container = DaosContainer(self.context)
            self.container.create(self.pool.pool.handle)

            # now open it
            self.container.open()

        except DaosApiError as error:
            self.log.error(str(error))
            self.fail("Test failed before snapshot taken")

        try:
            # create an object and write some data into it
            obj_cls = self.params.get("obj_class", '/run/object_class/*')
            thedata = b"Now is the winter of our discontent made glorious"
            datasize = len(thedata) + 1
            dkey = b"dkey"
            akey = b"akey"
            obj = self.container.write_an_obj(thedata,
                                              datasize,
                                              dkey,
                                              akey,
                                              obj_cls=obj_cls)
            obj.close()
            # Take a snapshot of the container
            self.snapshot = DaosSnapshot(self.context)
            self.snapshot.create(self.container.coh)
            self.log.info("Wrote an object and created a snapshot")

        except DaosApiError as error:
            self.fail("Test failed during the initial object write.\n{0}"
                      .format(error))

        # Make 500 changes to the data object. The write_an_obj function does a
        # commit when the update is complete
        try:
            self.log.info(
                "Committing 500 additional transactions to the same KV")
            more_transactions = 500
            while more_transactions:
                size = random.randint(1, 250) + 1
                new_data = get_random_bytes(size)
                new_obj = self.container.write_an_obj(
                    new_data, size, dkey, akey, obj_cls=obj_cls)
                new_obj.close()
                more_transactions -= 1
        except DaosApiError as error:
            self.fail(
                "Test failed during the write of 500 objects.\n{0}".format(
                    error))

        # List the snapshot
        try:
            reported_epoch = self.snapshot.list(self.container.coh)
        except DaosApiError as error:
            self.fail(
                "Test was unable to list the snapshot\n{0}".format(error))

        # Make sure the snapshot reflects the original epoch
        if self.snapshot.epoch != reported_epoch:
            self.fail(
                "The snapshot epoch returned from snapshot list is not the "
                "same as the original epoch snapshotted.")

        self.log.info(
            "After 500 additional commits the snapshot is still available")

        # Make sure the data in the snapshot is the original data.
        # Get a handle for the snapshot and read the object at dkey, akey.
        try:
            obj.open()
            snap_handle = self.snapshot.open(self.container.coh)
            thedata2 = self.container.read_an_obj(
                datasize, dkey, akey, obj, txn=snap_handle.value)
        except DaosApiError as error:
            self.fail(
                "Error when retrieving the snapshot data.\n{0}".format(error))

        # Compare the snapshot to the originally written data.
        if thedata2.value != thedata:
            self.fail(
                "The data in the snapshot is not the same as the original data")

        self.log.info(
            "The snapshot data matches the data originally written.")

        # Now destroy the snapshot
        try:
            self.snapshot.destroy(self.container.coh)
            self.log.info("Snapshot successfully destroyed")

        except DaosApiError as error:
            self.fail(str(error))
Пример #10
0
def launch_snapshot(self, pool, name):
    """Create a basic snapshot of the reserved pool.

    Args:

        self (obj): soak obj
        pool (obj): TestPool obj
        name (str): harasser

    """
    self.log.info("<<<PASS %s: %s started at %s>>>", self.loop, name,
                  time.ctime())
    status = True
    # Create container
    container = TestContainer(pool)
    container.namespace = "/run/container_reserved/*"
    container.get_params(self)
    container.create()
    container.open()
    obj_cls = self.params.get("object_class", '/run/container_reserved/*')

    # write data to object
    data_pattern = get_random_bytes(500)
    datasize = len(data_pattern) + 1
    dkey = b"dkey"
    akey = b"akey"
    obj = container.container.write_an_obj(data_pattern,
                                           datasize,
                                           dkey,
                                           akey,
                                           obj_cls=obj_cls)
    obj.close()
    # Take a snapshot of the container
    snapshot = DaosSnapshot(self.context)
    try:
        snapshot.create(container.container.coh)
    except (RuntimeError, TestFail, DaosApiError) as error:
        self.log.error("Snapshot failed", exc_info=error)
        status &= False
    if status:
        self.log.info("Sanpshot Created")
        # write more data to object
        data_pattern2 = get_random_bytes(500)
        datasize2 = len(data_pattern2) + 1
        dkey = b"dkey"
        akey = b"akey"
        obj2 = container.container.write_an_obj(data_pattern2,
                                                datasize2,
                                                dkey,
                                                akey,
                                                obj_cls=obj_cls)
        obj2.close()
        self.log.info("Wrote additional data to container")
        # open the snapshot and read the data
        obj.open()
        snap_handle = snapshot.open(container.container.coh)
        try:
            data_pattern3 = container.container.read_an_obj(
                datasize, dkey, akey, obj, txn=snap_handle.value)
        except (RuntimeError, TestFail, DaosApiError) as error:
            self.log.error("Error when retrieving the snapshot data %s", error)
            status &= False
        if status:
            # Compare the snapshot to the original written data.
            if data_pattern3.value != data_pattern:
                self.log.error("Snapshot data miscompare")
                status &= False
    # Destroy the snapshot
    try:
        snapshot.destroy(container.container.coh)
    except (RuntimeError, TestFail, DaosApiError) as error:
        self.log.error("Failed to destroy snapshot %s", error)
        status &= False
    # cleanup
    container.close()
    container.destroy()
    params = {"name": name, "status": status, "vars": {}}
    with H_LOCK:
        self.harasser_job_done(params)
    self.log.info("<<<PASS %s: %s completed at %s>>>\n", self.loop, name,
                  time.ctime())
Пример #11
0
    def test_no_space_cont_create(self):
        """
        :avocado: tags=all,container,tiny,full_regression,fullpoolcontcreate
        """
        # full storage rc
        err = "-1007"
        # probably should be -1007, revisit later
        err2 = "-1009"

        # create pool and connect
        self.prepare_pool()

        # query the pool
        self.log.info("Pool Query before write")
        self.pool.set_query_data()
        self.log.info("Pool %s query data: %s\n", self.pool.uuid,
                      self.pool.query_data)

        # create a container
        try:
            self.log.info("creating container 1")
            cont = DaosContainer(self.context)
            cont.create(self.pool.pool.handle)
            self.log.info("created container 1")
        except DaosApiError as excep:
            self.log.error("caught exception creating container: " "%s", excep)
            self.fail("caught exception creating container: {}".format(excep))

        self.log.info("opening container 1")
        cont.open()

        # generate random dkey, akey each time
        # write 1mb until no space, then 1kb, etc. to fill pool quickly
        for obj_sz in [1048576, 10240, 10, 1]:
            write_count = 0
            while True:
                self.d_log.debug("writing obj {0} sz {1} to "
                                 "container".format(write_count, obj_sz))
                my_str = b"a" * obj_sz
                my_str_sz = obj_sz
                dkey = get_random_bytes(5)
                akey = get_random_bytes(5)
                try:
                    dummy_oid = cont.write_an_obj(my_str,
                                                  my_str_sz,
                                                  dkey,
                                                  akey,
                                                  obj_cls="OC_SX")
                    self.d_log.debug("wrote obj {0}, sz {1}".format(
                        write_count, obj_sz))
                    write_count += 1
                except DaosApiError as excep:
                    if not (err in repr(excep) or err2 in repr(excep)):
                        self.log.error(
                            "caught exception while writing "
                            "object: %s", repr(excep))
                        cont.close()
                        self.fail("caught exception while writing "
                                  "object: {}".format(repr(excep)))
                    else:
                        self.log.info(
                            "pool is too full for %s byte "
                            "objects", obj_sz)
                        break

        self.log.info("closing container")
        cont.close()

        # query the pool
        self.log.info("Pool Query after filling")
        self.pool.set_query_data()
        self.log.info("Pool %s query data: %s\n", self.pool.uuid,
                      self.pool.query_data)

        # create a 2nd container now that pool is full
        try:
            self.log.info("creating 2nd container")
            cont2 = DaosContainer(self.context)
            cont2.create(self.pool.pool.handle)
            self.log.info("created 2nd container")

            self.log.info("opening container 2")
            cont2.open()

            self.log.info("writing one more object, write expected to fail")
            cont2.write_an_obj(my_str, my_str_sz, dkey, akey, obj_cls="OC_SX")
            self.log.info("closing container")
            cont2.close()
            self.fail("wrote one more object after pool was completely filled,"
                      " this should never print")
        except DaosApiError as excep:
            if not (err in repr(excep) or err2 in repr(excep)):
                self.log.error(
                    "caught unexpected exception while "
                    "writing object: %s", repr(excep))
                self.log.info("closing container")
                cont2.close()
                self.fail("caught unexpected exception while writing "
                          "object: {}".format(repr(excep)))
            else:
                self.log.info("correctly caught -1007 while attempting "
                              "to write object in full pool")
                self.log.info("closing container")
                cont2.close()
Пример #12
0
    def test_snapshots(self):
        # pylint: disable=no-member,too-many-locals
        """
        Test ID: DAOS-1386 Test container SnapShot information
                 DAOS-1371 Test list snapshots
                 DAOS-1395 Test snapshot destroy
                 DAOS-1402 Test creating multiple snapshots
        Test Description:
                (1)Create an object, write random data into it, and take
                   a snapshot.
                (2)Make changes to the data object. The write_an_obj function
                   does a commit when the update is complete.
                (3)Verify the data in the snapshot is the original data.
                   Get a handle for the snapshot and read the object at dkey,
                   akey. Compare it to the originally written data.
                (4)List the snapshot and make sure it reflects the original
                   epoch.
                   ==>Repeat step(1) to step(4) for multiple snapshot tests.
                (5)Verify the snapshots data.
                (6)Destroy the snapshot individually.
                   ==>Loop step(5) and step(6) to perform multiple snapshots
                   data verification and snapshot destroy test.
                (7)Check if still able to Open the destroyed snapshot and
                   Verify the snapshot removed from the snapshot list.
        Use Cases: Require 1 client and 1 server to run snapshot test.
                   1 pool and 1 container is used, num_of_snapshot defined
                   in the snapshot.yaml will be performed and verified.
        :avocado: tags=all,small,smoke,snap,snapshots,full_regression
        """

        test_data = []
        ss_number = 0
        obj_cls = self.params.get("obj_class", '/run/object_class/*')
        akey = self.params.get("akey", '/run/snapshot/*', default="akey")
        dkey = self.params.get("dkey", '/run/snapshot/*', default="dkey")
        akey = akey.encode("utf-8")
        dkey = dkey.encode("utf-8")
        data_size = self.params.get("test_datasize",
                                    '/run/snapshot/*',
                                    default=150)
        snapshot_loop = self.params.get("num_of_snapshot",
                                        '/run/snapshot/*',
                                        default=3)
        #
        # Test loop for creat, modify and snapshot object in the DAOS container.
        #
        while ss_number < snapshot_loop:
            # (1)Create an object, write some data into it, and take a snapshot
            ss_number += 1
            thedata = b"--->>>Happy Daos Snapshot Testing " + \
                str(ss_number).encode("utf-8") + \
                b"<<<---" + get_random_bytes(random.randint(1, data_size)) #nosec
            datasize = len(thedata) + 1
            try:
                obj = self.container.write_an_obj(thedata,
                                                  datasize,
                                                  dkey,
                                                  akey,
                                                  obj_cls=obj_cls)
                obj.close()
            except DaosApiError as error:
                self.fail("##(1)Test failed during the initial object "
                          "write: {}".format(str(error)))
            # Take a snapshot of the container
            snapshot = DaosSnapshot(self.context)
            snapshot.create(self.container.coh)
            self.log.info("==Wrote an object and created a snapshot")

            # Display snapshot
            self.log.info("=(1.%s)snapshot test loop: %s", ss_number,
                          ss_number)
            self.log.info("  ==snapshot.epoch= %s", snapshot.epoch)
            self.display_snapshot(snapshot)

            # Save snapshot test data
            test_data.append({
                "coh": self.container.coh,
                "tst_obj": obj,
                "snapshot": snapshot,
                "tst_data": thedata
            })

            # (2)Make changes to the data object. The write_an_obj function does
            #    a commit when the update is complete
            num_transactions = more_transactions = 200
            self.log.info(
                "=(2.%s)Committing %d additional transactions to "
                "the same KV.", ss_number, more_transactions)
            while more_transactions:
                size = random.randint(1, 250) + 1  #nosec
                new_data = get_random_bytes(size)
                try:
                    new_obj = self.container.write_an_obj(new_data,
                                                          size,
                                                          dkey,
                                                          akey,
                                                          obj_cls=obj_cls)
                    new_obj.close()
                except Exception as error:
                    self.fail("##(2)Test failed during the write of "
                              "multi-objects: {}".format(str(error)))
                more_transactions -= 1

            # (3)Verify the data in the snapshot is the original data.
            #    Get a handle for the snapshot and read the object at dkey, akey
            #    Compare it to the originally written data.
            self.log.info("=(3.%s)snapshot test loop: %s", ss_number,
                          ss_number)
            try:
                obj.open()
                snap_handle = snapshot.open(self.container.coh, snapshot.epoch)
                thedata3 = self.container.read_an_obj(datasize,
                                                      dkey,
                                                      akey,
                                                      obj,
                                                      txn=snap_handle.value)
                obj.close()
            except Exception as error:
                self.fail("##(3.1)Error when retrieving the snapshot data: {}".
                          format(str(error)))
            self.display_snapshot_test_data(test_data, ss_number)
            self.log.info("  ==thedata3.value= %s", thedata3.value)
            if thedata3.value != thedata:
                raise Exception("##(3.2)The data in the snapshot is not the "
                                "same as the original data")
            self.log.info("  ==The snapshot data matches the data originally"
                          " written.")

            # (4)List the snapshot and make sure it reflects the original epoch
            try:
                ss_list = snapshot.list(self.container.coh, snapshot.epoch)
                self.log.info("=(4.%s)snapshot.list(self.container.coh)= %s",
                              ss_number, ss_list)
                self.log.info("  ==snapshot.epoch=  %s", snapshot.epoch)

            except Exception as error:
                self.fail(
                    "##(4)Test was unable to list the snapshot: {}".format(
                        str(error)))
            self.log.info(
                "  ==After %s additional commits the snapshot is "
                "still available", num_transactions)

        # (5)Verify the snapshots data
        #    Step(5) and (6), test loop to perform multiple snapshots data
        #    verification and snapshot destroy.
        #    Use current_ss for the individual snapshot object.
        for ss_number in range(snapshot_loop - 1, 0, -1):
            ind = ss_number - 1
            self.log.info("=(5.%s)Verify the snapshot number %s:", ss_number,
                          ss_number)
            self.display_snapshot_test_data(test_data, ss_number)
            coh = test_data[ind]["coh"]
            current_ss = test_data[ind]["snapshot"]
            obj = test_data[ind]["tst_obj"]
            tst_data = test_data[ind]["tst_data"]
            datasize = len(tst_data) + 1
            try:
                obj.open()
                snap_handle5 = current_ss.open(coh, current_ss.epoch)
                thedata5 = self.container.read_an_obj(datasize,
                                                      dkey,
                                                      akey,
                                                      obj,
                                                      txn=snap_handle5.value)
                obj.close()
            except Exception as error:
                self.fail("##(5.1)Error when retrieving the snapshot data: {}".
                          format(str(error)))
            self.log.info("  ==snapshot tst_data =%s", thedata5.value)
            if thedata5.value != tst_data:
                raise Exception("##(5.2)Snapshot #{}, test data Mis-matches"
                                "the original data written.".format(ss_number))
            self.log.info(
                "  snapshot test number %s, test data matches"
                " the original data written.", ss_number)

            # (6)Destroy the individual snapshot
            self.log.info("=(6.%s)Destroy the snapshot epoch: %s", ss_number,
                          current_ss.epoch)
            try:
                current_ss.destroy(coh, current_ss.epoch)
                self.log.info("  ==snapshot.epoch %s successfully destroyed",
                              current_ss.epoch)
            except Exception as error:
                self.fail("##(6)Error on current_ss.destroy: {}".format(
                    str(error)))

        # (7)Check if still able to Open the destroyed snapshot and
        #    Verify the snapshot removed from the snapshot list
        try:
            obj.open()
            snap_handle7 = snapshot.open(coh, snapshot.epoch)
            thedata7 = self.container.read_an_obj(datasize,
                                                  dkey,
                                                  akey,
                                                  obj,
                                                  txn=snap_handle7.value)
            obj.close()
        except Exception as error:
            self.fail(
                "##(7)Error when retrieving the snapshot data: {}".format(
                    str(error)))
        self.log.info("=(7)=>thedata_after_snapshot.destroyed.value= %s",
                      thedata7.value)
        self.log.info("  ==>snapshot.epoch=     %s", snapshot.epoch)

        # Still able to open the snapshot and read data after destroyed.
        try:
            ss_list = snapshot.list(coh, snapshot.epoch)
            self.log.info("  -->snapshot.list(coh, snapshot.epoch)= %s",
                          ss_list)
        except Exception as error:
            self.fail("##(7)Error when calling the snapshot list: {}".format(
                str(error)))
Пример #13
0
    def test_snapshot_negativecases(self):
        # pylint: disable=no-member
        """
        Test ID: DAOS-1390 Verify snap_create bad parameter behavior.
                 DAOS-1322 Create a new container, verify snapshot state.
                           as expected for a brand new container.
                 DAOS-1392 Verify snap_destroy bad parameter behavior.
                 DAOS-1388 Verify snap_list bad parameter behavior.
        Test Description:
                (0)Take a snapshot of the newly created container.
                (1)Create an object, write random data into it, and take
                   a snapshot.
                (2)Verify the snapshot is working properly.
                (3)Test snapshot with an invalid container handle.
                (4)Test snapshot with a NULL container handle.
                (5)Verify snap_destroy with a bad parameter.
                (6)Verify snap_list bad parameter behavior.

        Use Cases: Combinations with minimum 1 client and 1 server.
        :avocado: tags=all,small,smoke,daily_regression,snap,snapshot_negative,
        :avocado: tags=snapshotcreate_negative
        """

        # DAOS-1322 Create a new container, verify snapshot state as expected
        #           for a brand new container.
        try:
            self.log.info(
                "==(0)Take a snapshot of the newly created container.")
            snapshot = DaosSnapshot(self.context)
            snapshot.create(self.container.coh)
            self.display_snapshot(snapshot)
        except Exception as error:
            self.fail("##(0)Error on a snapshot on a new container"
                      " {}".format(str(error)))

        # (1)Create an object, write some data into it, and take a snapshot
        obj_cls = self.params.get("obj_class", '/run/object_class/*')
        akey = self.params.get("akey", '/run/snapshot/*', default="akey")
        dkey = self.params.get("dkey", '/run/snapshot/*', default="dkey")
        akey = akey.encode("utf-8")
        dkey = dkey.encode("utf-8")
        data_size = self.params.get("test_datasize",
                                    '/run/snapshot/*',
                                    default=150)
        thedata = b"--->>>Happy Daos Snapshot-Create Negative Testing " + \
                  b"<<<---" + get_random_bytes(random.randint(1, data_size)) #nosec
        try:
            obj = self.container.write_an_obj(thedata,
                                              len(thedata) + 1,
                                              dkey,
                                              akey,
                                              obj_cls=obj_cls)
        except DaosApiError as error:
            self.fail("##(1)Test failed during the initial object write:"
                      " {}".format(str(error)))
        obj.close()
        # Take a snapshot of the container
        snapshot = self.take_snapshot(self.container)
        self.log.info("==(1)snapshot.epoch= %s", snapshot.epoch)

        # (2)Verify the snapshot is working properly.
        try:
            obj.open()
            snap_handle = snapshot.open(self.container.coh, snapshot.epoch)
            thedata2 = self.container.read_an_obj(len(thedata) + 1,
                                                  dkey,
                                                  akey,
                                                  obj,
                                                  txn=snap_handle.value)
        except Exception as error:
            self.fail("##(2)Error when retrieving the snapshot data:"
                      " {}".format(str(error)))
        self.log.info("==(2)snapshot_list[ind]=%s", snapshot)
        self.log.info("==snapshot.epoch=  %s", snapshot.epoch)
        self.log.info("==written thedata=%s", thedata)
        self.log.info("==thedata2.value= %s", thedata2.value)
        if thedata2.value != thedata:
            self.fail("##(2)The data in the snapshot is not the same as the "
                      "original data")
        self.log.info("==Snapshot data matches the data originally "
                      "written.")

        # (3)Test snapshot with an invalid container handle
        self.log.info("==(3)Snapshot with an invalid container handle.")
        if self.invalid_snapshot_test(self.container):
            self.log.info(
                "==>Negative test 1, expecting failed on taking "
                "snapshot with an invalid container.coh: %s", self.container)
        else:
            self.fail("##(3)Negative test 1 passing, expecting failed on"
                      " taking snapshot with an invalid container.coh: "
                      " {}".format(self.container))

        # (4)Test snapshot with a NULL container handle
        self.log.info("==(4)Snapshot with a NULL container handle.")
        if self.invalid_snapshot_test(None):
            self.log.info("==>Negative test 2, expecting failed on taking "
                          "snapshot on a NULL container.coh.")
        else:
            self.fail("##(4)Negative test 2 passing, expecting failed on "
                      "taking snapshot with a NULL container.coh.")

        # (5)DAOS-1392 destroy snapshot with an invalid handle
        self.log.info(
            "==(6)DAOS-1392 destroy snapshot with an invalid handle.")
        try:
            snapshot.destroy(None, snapshot.epoch)
            self.fail("##(6)Negative test destroy snapshot with an "
                      "invalid coh handle, expected fail, shown Passing##")
        except Exception as error:
            self.log.info(
                "==>Negative test, destroy snapshot with an invalid handle.")
            self.log.info("   Expected Error: %s", str(error))
            expected_error = "RC: -1002"
            if expected_error not in str(error):
                self.fail("##(6.1)Expecting error RC: -1002  did not show.")

        # (6)DAOS-1388 Verify snap_list bad parameter behavior
        self.log.info(
            "==(7)DAOS-1388 Verify snap_list bad parameter behavior.")
        try:
            snapshot.list(None, 0)
            self.fail("##(7)Negative test snapshot list with an "
                      "invalid coh and epoch, expected fail, shown Passing##")
        except Exception as error:
            self.log.info(
                "==>Negative test, snapshot list with an invalid coh.")
            self.log.info("   Expected Error: %s", str(error))
            expected_error = "RC: -1002"
            if expected_error not in str(error):
                self.fail("##(7.1)Expecting error RC: -1002  did not show.")