Esempio n. 1
0
    def test_valid_labels(self):
        """Test ID: DAOS-7942

        Test Description: Create and destroy pool with the following labels.
        * Random alpha numeric string of length 126.
        * Random alpha numeric string of length 127.
        * Random upper case string of length 50.
        * Random lower case string of length 50.
        * Random number string of length 50.

        :avocado: tags=all,full_regression
        :avocado: tags=vm
        :avocado: tags=pool,pool_label
        :avocado: tags=create_valid_labels
        """
        self.pool = []
        errors = []
        labels = [
            get_random_string(126),
            get_random_string(127),
            get_random_string(length=50, include=string.ascii_uppercase),
            get_random_string(length=50, include=string.ascii_lowercase),
            get_random_string(length=50, include=string.digits)
        ]

        for label in labels:
            errors.extend(self.verify_create(label, False))
            errors.extend(self.verify_destroy(self.pool[-1], False))

        report_errors(self, errors)
Esempio n. 2
0
    def test_duplicate_create(self):
        """Test ID: DAOS-7942

        Test Description:
        1. Create a pool with a label.
        2. Create another pool with the same label. Should fail.
        3. Destroy the pool.
        4. Create a pool with the same label again. It should work this time.

        :avocado: tags=all,full_regression
        :avocado: tags=vm
        :avocado: tags=pool,pool_label
        :avocado: tags=duplicate_label_create
        """
        self.pool = []
        label = "TestLabel"

        # Step 1
        report_errors(self, self.verify_create(label, False))

        # Step 2
        report_errors(self, self.verify_create(label, True, "already exists"))

        # Step 3
        report_errors(self, self.verify_destroy(self.pool[0], False))

        # Step 4
        report_errors(self, self.verify_create(label, False))
Esempio n. 3
0
    def test_label_update(self):
        """Test ID: DAOS-7942

        Test Description:
        1. Create a pool.
        2. Update the label with dmg pool set-prop.
        3. Call dmg pool get-prop and verify that the new label is returned.
        4. Try to destroy the pool with the old label. It should fail.
        5. Destroy the pool with the new label. Should work.

        :avocado: tags=all,full_regression
        :avocado: tags=vm
        :avocado: tags=pool,pool_label
        :avocado: tags=label_update
        """
        self.pool = []

        # Step 1
        old_label = "OldLabel"
        report_errors(
            self, self.verify_create(label=old_label, failure_expected=False))

        # Step 2. Update the label.
        new_label = "NewLabel"
        self.pool[-1].set_property(prop_name="label", prop_value=new_label)
        # Update the label in TestPool.
        self.pool[-1].label.update(new_label)

        # Step 3. Verify the label was set with get-prop.
        prop_value = self.pool[-1].get_property(prop_name="label")
        errors = []
        if prop_value != new_label:
            msg = "Unexpected label from get-prop! Expected = {}; Actual = {}".format(
                new_label, prop_value)
            errors.append(msg)

        # Step 4. Try to destroy the pool with the old label. Should fail.
        self.pool[-1].label.update(old_label)
        errors.extend(
            self.verify_destroy(pool=self.pool[-1], failure_expected=True))

        # Step 5. Destroy the pool with the new label. Should work.
        self.pool[-1].label.update(new_label)
        errors.extend(
            self.verify_destroy(pool=self.pool[-1], failure_expected=False))

        report_errors(test=self, errors=errors)
Esempio n. 4
0
    def test_invalid_labels(self):
        """Test ID: DAOS-7942

        Test Description: Create pool with following invalid labels.
        * UUID format string: 23ab123e-5296-4f95-be14-641de40b4d5a
        * Long label - 128 random chars.

        :avocado: tags=all,full_regression
        :avocado: tags=vm
        :avocado: tags=pool,pool_label
        :avocado: tags=create_invalid_labels
        """
        self.pool = []
        errors = []
        label_outs = [("23ab123e-5296-4f95-be14-641de40b4d5a",
                       "invalid label"),
                      (get_random_string(128), "invalid label")]

        for label_out in label_outs:
            errors.extend(self.verify_create(label_out[0], True, label_out[1]))

        report_errors(self, errors)
Esempio n. 5
0
    def test_duplicate_destroy(self):
        """Test ID: DAOS-7942

        Test Description:
        1. Create a pool with a label.
        2. Destroy it with the label.
        3. Destroy it with the label again. The second destroy should fail.

        :avocado: tags=all,full_regression
        :avocado: tags=small
        :avocado: tags=pool,duplicate_label_destroy
        """
        self.pool = []

        # Step 1
        report_errors(self, self.verify_create("TestLabel", False))

        # Step 2
        report_errors(self, self.verify_destroy(self.pool[-1], False))

        # Step 3
        report_errors(self, self.verify_destroy(self.pool[-1], True, True))
Esempio n. 6
0
    def test_agent_failure(self):
        """Jira ID: DAOS-9385.

        1. Create a pool and a container.
        2. Run IOR.
        3. Stop daos_agent process while IOR is running.
        4. Check the error on the client side. When daos_agent is killed in the middle of
        IOR, the IOR would fail.
        5. Verify journalctl shows the log that the agent is stopped. Call:
        journalctl --system -t daos_agent --since <before> --until <after>
        This step verifies that DAOS, or daos_agent process in this case, prints useful
        logs for the user to troubleshoot the issue, which in this case the application
        can’t be used.
        6. Restart daos_agent.
        7. Run IOR again. It should succeed this time without any error. This step
        verifies that DAOS can recover from the fault with minimal human intervention.

        :avocado: tags=all,full_regression
        :avocado: tags=hw,large
        :avocado: tags=deployment,fault_management,agent_failure
        :avocado: tags=agent_failure_basic
        """
        # 1. Create a pool and a container.
        self.add_pool()
        self.add_container(self.pool)

        # 2. Run IOR.
        ior_results = {}
        job_num = 1
        self.log.info("Run IOR with thread")
        job = threading.Thread(
            target=self.run_ior_collect_error,
            args=[ior_results, job_num, "test_file_1", [self.hostlist_clients[0]]])

        self.log.info("Start IOR %d (thread)", job_num)
        job.start()

        # We need to stop daos_agent while IOR is running, so need to wait for a few
        # seconds for IOR to start.
        self.log.info("Waiting 5 sec for IOR to start writing data...")
        time.sleep(5)

        errors = []

        # 3. Stop daos_agent process while IOR is running.
        since = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        self.log.info("Stopping agent")
        stop_agent_errors = self.stop_agents()
        for error in stop_agent_errors:
            self.log.debug(error)
            errors.append(error)
        until = datetime.now().strftime('%Y-%m-%d %H:%M:%S')

        # Wait until the IOR thread ends.
        job.join()

        # 4. Verify the error from the IOR command.
        self.log.info("--- IOR results %d ---", job_num)
        self.log.info(ior_results[job_num])
        if ior_results[job_num][0]:
            errors.append("IOR worked when agent is killed!")

        # 5. Verify journalctl shows the log that the agent is stopped.
        results = get_journalctl(
            hosts=self.hostlist_clients, since=since, until=until,
            journalctl_type="daos_agent")
        self.log.info("journalctl results = %s", results)
        if "shutting down" not in results[0]["data"]:
            msg = "Agent shut down message not found in journalctl! Output = {}".format(
                results)
            errors.append(msg)

        # 6. Restart agent.
        self.log.info("Restart agent")
        self.start_agent_managers()

        # 7. Run IOR again.
        job_num = 2
        self.log.info("Start IOR %d", job_num)
        self.run_ior_collect_error(
            job_num=job_num, results=ior_results, file_name="test_file_2",
            clients=[self.hostlist_clients[0]])

        # Verify that there's no error this time.
        self.log.info("--- IOR results %d ---", job_num)
        self.log.info(ior_results[job_num])
        if not ior_results[job_num][0]:
            ior_error = ior_results[job_num][-1]
            errors.append("IOR with restarted agent failed! Error: {}".format(ior_error))

        self.log.info("########## Errors ##########")
        report_errors(test=self, errors=errors)
        self.log.info("############################")
Esempio n. 7
0
    def test_agent_failure_isolation(self):
        """Jira ID: DAOS-9385.

        1. Create a pool and a container.
        2. Run IOR from the two client nodes.
        3. Stop daos_agent process while IOR is running on one of the clients.
        4. Wait until both of the IOR ends.
        5. Check that there's error on the kill client, but not on the keep client.
        6. On the killed client, verify journalctl shows the log that the agent is
        stopped.
        7. On the other client where agent is still running, verify that the journalctl
        doesn't show that the agent is stopped.
        8. Restart both daos_agent.
        9. Run IOR again from the keep client. It should succeed without any error.

        :avocado: tags=all,full_regression
        :avocado: tags=hw,large
        :avocado: tags=deployment,fault_management,agent_failure
        :avocado: tags=agent_failure_isolation
        """
        # 1. Create a pool and a container.
        self.add_pool()
        self.add_container(self.pool)

        agent_hosts = self.agent_managers[0].hosts
        self.log.info("agent_hosts = %s", agent_hosts)
        agent_host_keep = agent_hosts[0]
        agent_host_kill = agent_hosts[1]

        # 2. Run IOR from the two client nodes.
        ior_results = {}
        job_num_keep = 1
        job_num_kill = 2
        self.log.info("Run IOR with thread")
        thread_1 = threading.Thread(
            target=self.run_ior_collect_error,
            args=[ior_results, job_num_keep, "test_file_1", [agent_host_keep]])
        thread_2 = threading.Thread(
            target=self.run_ior_collect_error,
            args=[ior_results, job_num_kill, "test_file_2", [agent_host_kill]])

        self.log.info("Start IOR 1 (thread)")
        thread_1.start()
        thread_2.start()

        # We need to stop daos_agent while IOR is running, so need to wait for a few
        # seconds for IOR to start.
        self.log.info("Waiting 5 sec for IOR to start writing data...")
        time.sleep(5)

        errors = []

        # 3. Stop daos_agent process while IOR is running on one of the clients.
        since = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        self.log.info("Stopping agent on %s", agent_host_kill)
        pattern = self.agent_managers[0].manager.job.command_regex
        result = stop_processes(hosts=[agent_host_kill], pattern=pattern)
        if 0 in result and len(result) == 1:
            msg = "No daos_agent process killed from {}!".format(agent_host_kill)
            errors.append(msg)
        else:
            self.log.info("daos_agent in %s killed", agent_host_kill)
        until = datetime.now().strftime('%Y-%m-%d %H:%M:%S')

        # 4. Wait until both of the IOR thread ends.
        thread_1.join()
        thread_2.join()

        # 5. Check that there's error on the kill client, but not on the keep client.
        self.log.info("--- IOR results Kill ---")
        self.log.info(ior_results[job_num_kill])
        if ior_results[job_num_kill][0]:
            errors.append("IOR on agent kill host worked!")

        self.log.info("--- IOR results Keep ---")
        self.log.info(ior_results[job_num_keep])
        if not ior_results[job_num_keep][0]:
            ior_error = ior_results[job_num_keep][-1]
            errors.append("Error found in IOR on keep client! {}".format(ior_error))

        # 6. On the killed client, verify journalctl shows the log that the agent is
        # stopped.
        results = get_journalctl(
            hosts=[agent_host_kill], since=since, until=until,
            journalctl_type="daos_agent")
        self.log.info("journalctl results (kill) = %s", results)
        if "shutting down" not in results[0]["data"]:
            msg = ("Agent shut down message not found in journalctl on killed client! "
                   "Output = {}".format(results))
            errors.append(msg)

        # 7. On the other client where agent is still running, verify that the journalctl
        # in the previous step doesn't show that the agent is stopped.
        results = get_journalctl(
            hosts=[agent_host_keep], since=since, until=until,
            journalctl_type="daos_agent")
        self.log.info("journalctl results (keep) = %s", results)
        if "shutting down" in results[0]["data"]:
            msg = ("Agent shut down message found in journalctl on keep client! "
                   "Output = {}".format(results))
            errors.append(msg)

        # 8. Restart both daos_agent. (Currently, there's no clean way to restart one.)
        self.start_agent_managers()

        # 9. Run IOR again from the keep client. It should succeed without any error.
        self.log.info("--- Start IOR 2 ---")
        self.run_ior_collect_error(
            job_num=job_num_keep, results=ior_results, file_name="test_file_3",
            clients=agent_hosts)

        # Verify that there's no error.
        self.log.info("--- IOR results 2 ---")
        self.log.info(ior_results[job_num_keep])
        if not ior_results[job_num_keep][0]:
            ior_error = ior_results[job_num_keep][-1]
            errors.append("Error found in second IOR run! {}".format(ior_error))

        self.log.info("########## Errors ##########")
        report_errors(test=self, errors=errors)
        self.log.info("############################")
Esempio n. 8
0
    def verify_failure_with_protection(self, ior_namespace):
        """Jira ID: DAOS-10001.

        Verify that failing (excluding) one target from two server ranks would cause an
        error to the ongoing IOR even if data protection is used. Also verify that
        reintegrating the excluded target will bring back the system to the usable state.

        1. Run two server ranks and create a pool and a container with --properties=rf:1.
        2. Run IOR with --dfs.oclass RP_2G1/EC_2P1G1 --dfs.dir_oclass RP_2G1/EC_2P1G1
        3. While the IOR is running, exclude one target from each server rank so that IO
        fails even with replication.
        4. Verify the IOR failed.
        5. Verify that the container's Health property is UNCLEAN.
        6. Reintegrate the excluded targets and wait for the rebuild to finish.
        7. Verify that the container's Health property is HEALTHY.
        8. Restart the IOR and verify that it works. (Recovery test)
        9. Destroy the container to retrieve the space.
        10. Verify that a new container can be created and IOR works. (Recovery test)

        Args:
            ior_namespace (str): Yaml namespace that defines the object class used for
                IOR.
        """
        # 1. Create a pool and a container.
        self.add_pool(namespace="/run/pool_size_ratio_80/*")
        self.add_container(pool=self.pool,
                           namespace="/run/container_with_rf/*")

        # 2. Run IOR with oclass RP_2G1 or EC_2P1G1.
        ior_results = {}
        job_num = 1
        job = threading.Thread(target=self.run_ior_report_error,
                               args=[
                                   ior_results, job_num, "test_file_1",
                                   self.pool, self.container, ior_namespace
                               ])

        job.start()

        # We need to exclude targets while IOR is running, so need to wait for a few
        # seconds for IOR to start.
        self.log.info("Waiting 5 sec for IOR to start writing data...")
        time.sleep(5)

        errors = []

        # 3. Exclude one target from two server ranks while IOR is running.
        self.pool.exclude(ranks=[1], tgt_idx="1")
        # If we exclude back to back, it would cause an error. Wait for the rebuild to
        # start before excluding the next target.
        self.pool.wait_for_rebuild(to_start=True)
        self.pool.exclude(ranks=[0], tgt_idx="1")
        self.pool.measure_rebuild_time(operation="Exclude 2 targets",
                                       interval=5)

        # Wait until the IOR thread ends.
        job.join()

        # 4. Verify that the IOR failed.
        self.log.info("----- IOR results 1 -----")
        self.log.info(ior_results)
        ior_error = ior_results[job_num][-1]
        self.log.info("IOR 1 error = %s", ior_error)
        if ior_results[job_num][0]:
            errors.append("First IOR was supposed to fail, but worked!")

        # 5. Verify that the container's Health property is UNCLEAN.
        if not self.check_container_health(container=self.container,
                                           expected_health="UNCLEAN"):
            errors.append("Container health isn't UNCLEAN after first IOR!")

        # 6. Reintegrate the excluded target.
        self.log.info("Reintegrate rank 1 target 1")
        # Reintegrate one target and wait for rebuild to finish before reintegrating the
        # next one.
        self.pool.reintegrate(rank="1", tgt_idx="1")
        self.pool.measure_rebuild_time(
            operation="Reintegrate rank 1 -> target 1", interval=5)
        self.log.info("Reintegrate rank 0 target 1")
        self.pool.reintegrate(rank="0", tgt_idx="1")
        self.pool.measure_rebuild_time(
            operation="Reintegrate rank 0 -> target 1", interval=5)

        # 7. Verify that the container's Health property is HEALTHY.
        if not self.check_container_health(container=self.container,
                                           expected_health="HEALTHY"):
            errors.append("Container health isn't HEALTHY after reintegrate!")

        # 8. Restart IOR. Should work.
        ior_results = {}
        self.run_ior_report_error(job_num=job_num,
                                  results=ior_results,
                                  file_name="test_file_2",
                                  pool=self.pool,
                                  container=self.container,
                                  namespace=ior_namespace)

        # Verify that there's no error this time.
        self.log.info("----- IOR results 2 -----")
        self.log.info(ior_results)
        if not ior_results[job_num][0]:
            errors.append("Error found in second IOR run! {}".format(
                ior_results[job_num][1]))

        # 9. Destroy the container to retrieve the space.
        self.container.destroy()

        # 10. Create a new container and run IOR.
        self.add_container(pool=self.pool,
                           namespace="/run/container_with_rf/*")
        ior_results = {}
        self.run_ior_report_error(job_num=job_num,
                                  results=ior_results,
                                  file_name="test_file_3",
                                  pool=self.pool,
                                  container=self.container,
                                  namespace=ior_namespace)

        # Verify that there's no error.
        self.log.info("----- IOR results 2 -----")
        self.log.info(ior_results)
        if not ior_results[job_num][0]:
            errors.append("Error found in third IOR run! {}".format(
                ior_results[job_num][-1]))

        self.log.info("########## Errors ##########")
        report_errors(test=self, errors=errors)
        self.log.info("############################")
Esempio n. 9
0
    def test_target_failure_parallel(self):
        """Jira ID: DAOS-10001.

        Verifying that failing a target in one pool doesn't affect other pool.

        1. Create 2 pools and a container in each pool.
        2. Run IOR with oclass SX on all containers at the same time.
        3. Exclude one target from self.pool[1] while IOR is running.
        4. Verify the IOR failed for self.pool[1].
        5. Verify the IOR worked for self.pool[0].
        6. Verify that the self.pool[1].container's Health property is UNCLEAN.
        7. Reintegrate the excluded target.
        8. Verify that the self.pool[1].container's Health property is HEALTHY.
        9. Run IOR again.
        10. Verify that there's no error this time.

        :avocado: tags=all,full_regression
        :avocado: tags=hw,medium,ib2
        :avocado: tags=deployment,target_failure
        :avocado: tags=target_failure_parallel
        """
        self.pool = []
        self.container = []

        # 1. Create 2 pools and a container in each pool. Each pool uses about 40% of the
        # available storage.
        self.pool.append(self.get_pool(namespace="/run/pool_size_ratio_40/*"))
        self.pool.append(self.get_pool(namespace="/run/pool_size_ratio_66/*"))
        for i in range(2):
            self.container.append(
                self.get_container(pool=self.pool[i],
                                   namespace="/run/container_wo_rf/*"))

        # 2. Run IOR with oclass SX on all containers at the same time.
        ior_results = {}
        ior_namespace = "/run/ior_wo_rf/*"
        threads = []

        for pool_num in range(2):
            threads.append(
                threading.Thread(target=self.run_ior_report_error,
                                 args=[
                                     ior_results, pool_num, "test_file_1",
                                     self.pool[pool_num],
                                     self.container[pool_num], ior_namespace
                                 ]))

            threads[-1].start()

        # Wait for a few seconds for IOR to start.
        self.log.info("Waiting 5 sec for IOR to start writing data...")
        time.sleep(5)

        errors = []

        # 3. Exclude one target from self.pool[1] while IOR is running.
        excluded_pool_num = 1
        non_excluded_pool_num = 0
        self.log.info("Exclude rank 1 target 0")
        self.pool[excluded_pool_num].exclude(ranks=[1], tgt_idx="0")
        self.pool[excluded_pool_num].measure_rebuild_time(
            operation="Exclude 1 target", interval=5)

        # Wait until all the IOR threads end.
        for thread in threads:
            thread.join()

        # 4. Verify that the IOR failed for self.pool[1].
        failed_ior_result = ior_results[excluded_pool_num]
        self.log.info("----- IOR results 1 -----")
        self.log.info(failed_ior_result)
        if failed_ior_result[0]:
            msg = "First IOR {} was supposed to fail, but worked! {}".format(
                excluded_pool_num, failed_ior_result)
            errors.append(msg)

        # 5. Verify that the IOR worked for self.pool[0].
        succeeded_ior_result = ior_results[non_excluded_pool_num]
        if not succeeded_ior_result[0]:
            msg = "First IOR {} was supposed to worked, but failed! {}".format(
                non_excluded_pool_num, succeeded_ior_result)
            errors.append(msg)

        # 6. Verify that self.container[1]'s Health property is UNCLEAN.
        if not self.check_container_health(container=self.container[1],
                                           expected_health="UNCLEAN"):
            errors.append("Container health isn't UNCLEAN after first IOR!")

        # 7. Reintegrate the excluded target.
        self.log.info("Reintegrate target")
        self.pool[excluded_pool_num].reintegrate(rank="1", tgt_idx="0")
        self.pool[excluded_pool_num].measure_rebuild_time(
            operation="Reintegrate 1 target", interval=5)

        # 8. Verify that self.container[1]'s Health property is HEALTHY.
        if not self.check_container_health(container=self.container[1],
                                           expected_health="HEALTHY"):
            errors.append("Container health isn't HEALTHY after first IOR!")

        # 9. Run IOR again.
        self.run_ior_report_error(job_num=excluded_pool_num,
                                  results=ior_results,
                                  file_name="test_file_2",
                                  pool=self.pool[excluded_pool_num],
                                  container=self.container[excluded_pool_num],
                                  namespace=ior_namespace)

        # 10. Verify that there's no error this time.
        self.log.info("----- IOR results 2 -----")
        ior_result = ior_results[excluded_pool_num]
        self.log.debug(ior_result)
        if not ior_result[0]:
            errors.append("Error found in second IOR run! {}".format(
                ior_result[1]))

        self.log.info("########## Errors ##########")
        report_errors(test=self, errors=errors)
        self.log.info("############################")
Esempio n. 10
0
    def test_target_failure_wo_rf(self):
        """Jira ID: DAOS-10001.

        Verify that failing (excluding) one target would cause an error to the ongoing
        IOR. Also verify that reintegrating the excluded target will bring back the system
        to the usable state.

        1. Create a pool and a container.
        2. Run IOR with oclass SX.
        3. Exclude one target while IOR is running.
        4. Verify the IOR failed.
        5. Verify that the container's Health property is UNCLEAN.
        6. Reintegrate the excluded target.
        7. Verify that the container's Health property is HEALTHY.
        8. Run IOR again.
        9. Verify that there's no error this time.

        :avocado: tags=all,full_regression
        :avocado: tags=hw,medium,ib2
        :avocado: tags=deployment,target_failure
        :avocado: tags=target_failure_wo_rf
        """
        # 1. Create a pool and a container.
        self.add_pool(namespace="/run/pool_size_ratio_80/*")
        self.add_container(pool=self.pool, namespace="/run/container_wo_rf/*")

        # 2. Run IOR with oclass SX so that excluding one target will result in a failure.
        ior_results = {}
        job_num = 1
        ior_namespace = "/run/ior_wo_rf/*"
        job = threading.Thread(target=self.run_ior_report_error,
                               args=[
                                   ior_results, job_num, "test_file_1",
                                   self.pool, self.container, ior_namespace
                               ])

        job.start()

        # Wait for a few seconds for IOR to start.
        self.log.info("Waiting 5 sec for IOR to start writing data...")
        time.sleep(5)

        errors = []

        # 3. Exclude one target while IOR is running.
        self.pool.exclude(ranks=[1], tgt_idx="0")
        self.pool.measure_rebuild_time(operation="Exclude 1 target",
                                       interval=5)

        # Wait until the IOR thread ends.
        job.join()

        # 4. Verify that the IOR failed.
        self.log.info("----- IOR results 1 -----")
        self.log.info(ior_results)
        if ior_results[job_num][0]:
            ior_error = ior_results[job_num][-1]
            errors.append(
                "First IOR was supposed to fail, but worked! {}".format(
                    ior_error))

        # 5. Verify that the container's Health property is UNCLEAN.
        if not self.check_container_health(container=self.container,
                                           expected_health="UNCLEAN"):
            errors.append("Container health isn't UNCLEAN after first IOR!")

        # 6. Reintegrate the excluded target.
        self.pool.reintegrate(rank="1", tgt_idx="0")
        self.pool.measure_rebuild_time(operation="Reintegrate 1 target",
                                       interval=5)

        # 7. Verify that the container's Health property is HEALTHY.
        if not self.check_container_health(container=self.container,
                                           expected_health="HEALTHY"):
            errors.append("Container health isn't HEALTHY after reintegrate!")

        # 8. Run IOR again.
        ior_results = {}
        self.run_ior_report_error(job_num=job_num,
                                  results=ior_results,
                                  file_name="test_file_2",
                                  pool=self.pool,
                                  container=self.container,
                                  namespace=ior_namespace)

        # 9. Verify that there's no error this time.
        self.log.info("----- IOR results 2 -----")
        self.log.info(ior_results)
        if not ior_results[job_num][0]:
            ior_error = ior_results[job_num][1]
            errors.append(
                "Error found in second IOR run! {}".format(ior_error))

        self.log.info("########## Errors ##########")
        report_errors(test=self, errors=errors)
        self.log.info("############################")