示例#1
0
    def test_03_license_check_vms(self):
        reporting.add_test_script(str(__name__) + "_check_vms")
        try:
            #Create license using CLI command
            self.cmd = command_argument_string.license_create + tvaultconf.vm_license_filename
            rc = cli_parser.cli_returncode(self.cmd)
            if rc != 0:
                reporting.add_test_step("Apply 10VM license", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Apply 10VM license", tvaultconf.PASS)

#Create simple workload
            self.workload_instances = []
            for i in range(0, 2):
                self.vm_id = self.create_vm()
                self.volume_id = self.create_volume()
                self.attach_volume(self.volume_id, self.vm_id)
                self.workload_instances.append(self.vm_id)
            self.wid = self.workload_create(self.workload_instances,
                                            tvaultconf.parallel)
            LOG.debug("Workload ID: " + str(self.wid))

            #Verify license-check CLI command
            self.cmd = command_argument_string.license_check
            rc = cli_parser.cli_returncode(self.cmd)
            if rc != 0:
                reporting.add_test_step("Execute license-check command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute license-check command",
                                        tvaultconf.PASS)

        #Verification
            out = cli_parser.cli_output(self.cmd)
            LOG.debug("CLI Response: " + str(out))
            if (str(out).find('2') != -1):
                reporting.add_test_step("License-check verification",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step("License-check verification",
                                        tvaultconf.FAIL)
                raise Exception("License-check verification failed")
            reporting.test_case_to_write()
        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
示例#2
0
    def license_check_capacity(self):
        reporting.add_test_script(str(__name__) + "_check_capacity")
        try:
            # Create license using CLI command
            self.cmd = command_argument_string.license_create + \
                tvaultconf.capacity_license_filename
            rc = cli_parser.cli_returncode(self.cmd)
            if rc != 0:
                reporting.add_test_step("Apply 100GB license", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Apply 100GB license", tvaultconf.PASS)

            # Verify license-check CLI command
            self.cmd = command_argument_string.license_check
            rc = cli_parser.cli_returncode(self.cmd)
            if rc != 0:
                reporting.add_test_step("Execute license-check command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute license-check command",
                                        tvaultconf.PASS)

            # Verification
            out = cli_parser.cli_output(self.cmd)
            LOG.debug("CLI Response: " + str(out))
            get_usage_tvault = "df -h | grep triliovault-mounts"
            ssh = self.SshRemoteMachineConnection(tvaultconf.tvault_ip[0],
                                                  tvaultconf.tvault_dbusername,
                                                  tvaultconf.tvault_password)
            stdin, stdout, stderr = ssh.exec_command(get_usage_tvault)
            tmp = ' '.join(stdout.read().split())
            usage = tmp.split(' ')
            LOG.debug("Data from Tvault: " + str(usage) + " Usage: " +
                      str(usage[2]))
            ssh.close()
            if (str(out).find(usage[2]) != -1):
                reporting.add_test_step("License-check verification",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step("License-check verification",
                                        tvaultconf.FAIL)
                raise Exception("License-check verification failed")
            reporting.test_case_to_write()
        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_tvault1030_list_workloadtype(self):
        try:
            # List available workload types using CLI command
            rc = cli_parser.cli_returncode(
                command_argument_string.workload_type_list)
            if rc != 0:
                reporting.add_test_step("Execute workload-type-list command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-type-list command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            wc = query_data.get_available_workload_types()
            out = cli_parser.cli_output(
                command_argument_string.workload_type_list)
            if (int(wc) == int(out)):
                reporting.add_test_step("Verification with DB",
                                        tvaultconf.PASS)
                LOG.debug(
                    "Workload type list command listed available workload types correctly"
                )
            else:
                reporting.add_test_step("Verification with DB",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload type list command did not list available workload types correctly"
                )
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
示例#4
0
    def test_02_invalid_license(self):
	reporting.add_test_script(str(__name__)+"_invalid_license")
	try:
	    #Create license using CLI command
  	    self.cmd = command_argument_string.license_create + tvaultconf.invalid_license_filename
            LOG.debug("License create command: " + str(self.cmd))
            rc = cli_parser.cli_returncode(self.cmd)
            if rc != 0:
    	        reporting.add_test_step("Execute license_create command with invalid license", tvaultconf.PASS)
                LOG.debug("Command executed correctly")
            else:
	        reporting.add_test_step("Execute license_create command with invalid license", tvaultconf.FAIL)
                raise Exception("Command not executed correctly")

	    self.license_txt = ""
    	    #Get license key content
	    with open(tvaultconf.invalid_license_filename) as f:
	        for line in f:
	            self.license_txt += line
	    LOG.debug("License text: " + str(self.license_txt))
	    out = self.create_license(tvaultconf.invalid_license_filename, self.license_txt)
	    LOG.debug("license-create API output: " + str(out))
	    if(str(out).find('Cannot verify the license signature') != -1):
		reporting.add_test_step("Verify error message", tvaultconf.PASS)
	    else:
		reporting.add_test_step("Verify error message", tvaultconf.FAIL)
                raise Exception("Incorrect error message displayed")
            reporting.test_case_to_write()
	except Exception as e:
	    LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
示例#5
0
    def test_01_expired_license(self):
        reporting.add_test_script(str(__name__) + "_expired_license")
        try:
            #Create license using CLI command
            self.cmd = command_argument_string.license_create + tvaultconf.expired_license_filename
            LOG.debug("License create command: " + str(self.cmd))
            rc = cli_parser.cli_returncode(self.cmd)
            LOG.debug("rc value: " + str(rc))
            if rc != 0:
                reporting.add_test_step(
                    "Execute license_create command with expired license",
                    tvaultconf.FAIL)
                raise Exception("Command not executed correctly")
            else:
                reporting.add_test_step(
                    "Execute license_create command with expired license",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            out = self.get_license_check()
            LOG.debug("license-check API output: " + str(out))
            if (str(out).find('License expired') != -1):
                reporting.add_test_step("Verify license expiration message",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step("Verify license expiration message",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Incorrect license expiration message displayed")
            reporting.test_case_to_write()
        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_create_license(self):
        # Create license using CLI command
        self.cmd = command_argument_string.license_create + tvaultconf.compute_license_filename
        LOG.debug("License create command: " + str(self.cmd))
        rc = cli_parser.cli_returncode(self.cmd)
        if rc != 0:
            reporting.add_test_step(
                "Execute license_create command", tvaultconf.FAIL)
            reporting.set_test_script_status(tvaultconf.FAIL)
            raise Exception("Command did not execute correctly")
        else:
            reporting.add_test_step(
                "Execute license_create command", tvaultconf.PASS)
            LOG.debug("Command executed correctly")

        # Verification
        self.license_data = self.get_license_list()
        LOG.debug("License data returned: " + str(self.license_data))
        if(len(self.license_data.keys()) != 0):
            reporting.add_test_step("License verification", tvaultconf.PASS)
        else:
            reporting.add_test_step("License verification", tvaultconf.FAIL)
            reporting.set_test_script_status(tvaultconf.FAIL)
            raise Exception("License not added")
        reporting.test_case_to_write()
示例#7
0
    def test_3_config_backup_show(self):
        global config_backup_id
        reporting.add_test_script(str(__name__) + "_config_backup_show: cli")
        try:
            # test config_backup_show
            config_backup_show_command = command_argument_string.config_backup_show + " " + str(
                config_backup_id)

            LOG.debug("config backup show cli command: " +
                      str(config_backup_show_command))

            rc = cli_parser.cli_returncode(config_backup_show_command)
            if rc != 0:
                reporting.add_test_step(
                    "Triggering config_backup_show command via CLI",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Triggering config_backup_show command via CLI",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_4_config_workload_configure_invalid_user(self):
        reporting.add_test_script(str(__name__) + "_invalid_user")
        try:

            # remove sudo access from config_user
            self.sudo_access_config_user(access=False)

            # for config backup configuration, yaml_file creation
            self.create_config_backup_yaml()

            # config backup configuration with CLI command
            config_workload_command = command_argument_string.config_workload_configure + " --config-file yaml_file.yaml --authorized-key config_backup_pvk "

            LOG.debug("config workload configure cli command: " +
                      str(config_workload_command))

            rc = cli_parser.cli_returncode(config_workload_command)
            if rc != 0:
                reporting.add_test_step(
                    "Triggering config_workload_configure command via CLI",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")
            else:
                reporting.add_test_step(
                    "Triggering config_workload_configure command via CLI",
                    tvaultconf.FAIL)
                LOG.debug("Command executed incorrectly")

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
示例#9
0
    def test_tvault1035_list_workload(self):
        try:
            # Prerequisites
            self.created = False
            self.workload_instances = []
            # Launch instance
            self.vm_id = self.create_vm()
            LOG.debug("VM ID: " + str(self.vm_id))

            # Create volume
            self.volume_id = self.create_volume()
            LOG.debug("Volume ID: " + str(self.volume_id))

            # Attach volume to the instance
            self.attach_volume(self.volume_id, self.vm_id)
            LOG.debug("Volume attached")

            # Create workload
            self.workload_instances.append(self.vm_id)
            self.wid = self.workload_create(
                self.workload_instances,
                tvaultconf.parallel,
                workload_name=tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(self.wid))

            # List available workloads using CLI command
            rc = cli_parser.cli_returncode(
                command_argument_string.workload_list)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-list command", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-list command", tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            wc = query_data.get_available_workloads()
            out = cli_parser.cli_output(command_argument_string.workload_list)
            if (int(wc) == int(out)):
                reporting.add_test_step(
                    "Verification with DB", tvaultconf.PASS)
                LOG.debug(
                    "Workload list command listed available workloads correctly")
            else:
                reporting.add_test_step(
                    "Verification with DB", tvaultconf.FAIL)
                raise Exception(
                    "Workload list command did not list available workloads correctly, from db: " +
                    str(wc) +
                    " , from cmd: " +
                    str(out))
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
示例#10
0
    def test_05_license_check_compute(self):
        reporting.add_test_script(str(__name__) + "_check_compute")
        try:
            # Create license using CLI command
            self.cmd = command_argument_string.license_create + \
                tvaultconf.compute_license_filename
            rc = cli_parser.cli_returncode(self.cmd)
            if rc != 0:
                reporting.add_test_step("Apply 10 compute node license",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Apply 10 compute node license",
                                        tvaultconf.PASS)

            # Verify license-check CLI command
            self.cmd = command_argument_string.license_check
            rc = cli_parser.cli_returncode(self.cmd)
            if rc != 0:
                reporting.add_test_step("Execute license-check command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute license-check command",
                                        tvaultconf.PASS)

            # Verification
            out = cli_parser.cli_output(self.cmd)
            LOG.debug("CLI Response: " + str(out))
            if (str(out).find('Number of compute nodes deployed \'' +
                              str(tvaultconf.no_of_compute_nodes) + '\'') !=
                    -1):
                reporting.add_test_step("License-check verification",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step("License-check verification",
                                        tvaultconf.FAIL)
                raise Exception("License-check verification failed")
            reporting.test_case_to_write()
        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
示例#11
0
    def test_1_create_full_snapshot(self):
        try:
            reporting.add_test_script(str(__name__) + "_create_full_snapshot")

            global vm_id
            global volume_id
            global workload_id
            global snapshot_id

            workload_id = self.wid
            vm_id = self.vm_id
            volume_id = self.volume_id

            LOG.debug("workload is:" + str(workload_id))
            LOG.debug("vm id: " + str(vm_id))
            LOG.debug("volume id: " + str(volume_id))

            self.created = False

            # Create snapshot with CLI command
            create_snapshot = command_argument_string.snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-snapshot command with --full",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-snapshot command with --full",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            snapshot_id = query_data.get_inprogress_snapshot_id(workload_id)
            LOG.debug("Snapshot ID: " + str(snapshot_id))

            wc = self.wait_for_snapshot_tobe_available(workload_id,
                                                       snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Full snapshot", tvaultconf.PASS)
                LOG.debug("Workload snapshot successfully completed")
                self.created = True
            else:
                if (str(wc) == "error"):
                    pass
            if (self.created == False):
                reporting.add_test_step("Full snapshot", tvaultconf.FAIL)
                raise Exception("Workload snapshot did not get created")

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_tvault1033_create_workload(self):
        try:
            # Prerequisites
            self.created = False
            # Launch instance
            self.vm_id = self.create_vm()
            LOG.debug("VM ID: " + str(self.vm_id))

            # Create volume
            self.volume_id = self.create_volume()
            LOG.debug("Volume ID: " + str(self.volume_id))

            # Attach volume to the instance
            self.attach_volume(self.volume_id, self.vm_id)
            LOG.debug("Volume attached")

            # Create workload with CLI command
            workload_create = command_argument_string.workload_create + \
                " --instance instance-id=" + str(self.vm_id)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-create command", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-create command", tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            time.sleep(10)
            self.wid = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(self.wid))
            if(self.wid is not None):
                self.wait_for_workload_tobe_available(self.wid)
                if(self.getWorkloadStatus(self.wid) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            # Cleanup
            # Delete workload
            self.workload_delete(self.wid)
            LOG.debug("Workload deleted successfully")
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
示例#13
0
    def test_2_create_incremental_snapshot(self):
        try:
            reporting.add_test_script(
                str(__name__) + "_create_incremental_snapshot")

            global workload_id
            self.created = False
            LOG.debug("workload is:" + str(workload_id))

            # Create incremental snapshot using CLI command
            create_snapshot = command_argument_string.incr_snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step("Execute workload-snapshot command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-snapshot command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            self.incr_snapshot_id = query_data.get_inprogress_snapshot_id(
                workload_id)
            LOG.debug("Incremental Snapshot ID: " + str(self.incr_snapshot_id))

            # Wait for incremental snapshot to complete
            wc = self.wait_for_snapshot_tobe_available(workload_id,
                                                       self.incr_snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Incremental snapshot",
                                        tvaultconf.PASS)
                LOG.debug(
                    "Workload incremental snapshot successfully completed")
                self.created = True
            if (self.created == False):
                reporting.add_test_step("Incremental snapshot",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload incremental snapshot did not get created")

            # Cleanup
            # Delete snapshot
            self.snapshot_delete(workload_id, self.incr_snapshot_id)
            LOG.debug("Incremental Snapshot deleted successfully")

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
示例#14
0
    def test_4_delete_snapshot(self):
        try:
            global workload_id
            global snapshot_id
            global volume_id
            global vm_id

            reporting.add_test_script(str(__name__) + "_delete_snapshot")

            # Delete snapshot using CLI command
            rc = cli_parser.cli_returncode(
                command_argument_string.snapshot_delete + snapshot_id)
            if rc != 0:
                reporting.add_test_step("Execute snapshot-delete command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute snapshot-delete command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")
            time.sleep(5)
            wc = query_data.get_workload_snapshot_delete_status(
                tvaultconf.snapshot_name, tvaultconf.snapshot_type_full,
                snapshot_id)
            LOG.debug("Snapshot Delete status: " + str(wc))
            if (str(wc) == "1"):
                reporting.add_test_step("Verification", tvaultconf.PASS)
                LOG.debug("Workload snapshot successfully deleted")
            else:
                reporting.add_test_step("Verification", tvaultconf.FAIL)
                raise Exception("Snapshot did not get deleted")

            # Cleanup
            # Delete volume
            self.volume_snapshots = self.get_available_volume_snapshots()
            self.delete_volume_snapshots(self.volume_snapshots)

            # Delete workload
            self.workload_delete(workload_id)

            # Delete vm
            self.delete_vm(vm_id)

            # Delete volume
            self.delete_volume(volume_id)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
示例#15
0
    def test_4_config_backup_delete(self):
        global config_backup_id
        reporting.add_test_script(str(__name__) + "_config_backup_delete: cli")
        try:
            # test_config_backup_delete

            # config backup configuration with CLI command
            config_backup_delete_command = command_argument_string.config_backup_delete + " " + str(
                config_backup_id)

            LOG.debug("config backup delete cli command: " +
                      str(config_backup_delete_command))

            rc = cli_parser.cli_returncode(config_backup_delete_command)
            if rc != 0:
                reporting.add_test_step(
                    "Triggering config_backup_delete command via CLI",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Triggering config_backup_delete command via CLI",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            config_backup_id_after_deletion = query_data.get_config_backup_id()
            LOG.debug("Config backup id after: " +
                      str(config_backup_id_after_deletion))

            if config_backup_id_after_deletion == config_backup_id:
                reporting.add_test_step("Config Backup Deletion",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step("Config Backup Deletion",
                                        tvaultconf.PASS)

# delete config_user
            self.delete_config_user()

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
示例#16
0
def config_backup(self):
    self.config_user_create()
    # for config backup configuration, yaml_file creation
    added_dir = {tvaultconf.additional_dir.keys(
    )[0]: {'config_dir': tvaultconf.additional_dir.values()[0]}}
    self.create_config_backup_yaml(added_dir=added_dir)

    # config backup configuration with CLI command
    config_workload_command = command_argument_string.config_workload_configure + \
        " --config-file yaml_file.yaml --authorized-key config_backup_pvk "

    LOG.debug("config workload configure cli command: " +
              str(config_workload_command))

    rc = cli_parser.cli_returncode(config_workload_command)
    if rc != 0:
        LOG.debug("Triggering config_workload_configure command via CLI FAIL")
        raise Exception("Command did not execute correctly")
    else:
        LOG.debug("Triggering config_workload_configure command via CLI PASS")
        LOG.debug("Command executed correctly")
示例#17
0
    def test_3_list_snapshot(self):
        try:
            reporting.add_test_script(str(__name__) + "_list_snapshot")

            # List snapshots using CLI command
            rc = cli_parser.cli_returncode(
                command_argument_string.snapshot_list)
            if rc != 0:
                reporting.add_test_step("Execute snapshot-list command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute snapshot-list command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            wc = query_data.get_available_snapshots()
            out = cli_parser.cli_output(command_argument_string.snapshot_list)
            if (int(wc) == int(out)):
                reporting.add_test_step("Verification with DB",
                                        tvaultconf.PASS)
                LOG.debug(
                    "Snapshot list command listed available snapshots correctly"
                )
            else:
                reporting.add_test_step("Verification with DB",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Snapshot list command did not list available snapshots correctly"
                )

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
示例#18
0
    def test_tvault1031_show_workloadtype(self):
        try:
            #Get workload type details using CLI command
            rc = cli_parser.cli_returncode(
                command_argument_string.workload_type_show)
            if rc != 0:
                reporting.add_test_step("Execute workload-type-show command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-type-show command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            db_resp = query_data.get_workload_type_data(
                tvaultconf.workload_type_id)
            LOG.debug("Response from DB: " + str(db_resp))
            cmd_resp = cli_parser.cli_output(
                command_argument_string.workload_type_show)
            LOG.debug("Response from CLI: " + str(cmd_resp))

            if (db_resp[5] == tvaultconf.workload_type_id):
                reporting.add_test_step("Verification with DB",
                                        tvaultconf.PASS)
                LOG.debug("Workload type response from CLI and DB match")
            else:
                reporting.add_test_step("Verification with DB",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload type response from CLI and DB do not match")
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
示例#19
0
    def test_1_volume_booted(self):
        try:
            ### Create vm ###
            deleted = 0
            reporting.add_test_script(str(__name__))

            volume_id = self.create_volume(size=tvaultconf.bootfromvol_vol_size, image_id=CONF.compute.image_ref, volume_cleanup=False)
            self.set_volume_as_bootable(volume_id)
            self.block_mapping_details = [{ "source_type": "volume",
                            "delete_on_termination": "false",
                            "boot_index": 0,
                            "uuid": volume_id,
                            "destination_type": "volume"}]
            vm_id = self.create_vm(image_id="", block_mapping_data=self.block_mapping_details, vm_cleanup=False)

            ### Create workload ###

            workload_id=self.workload_create([vm_id],tvaultconf.parallel, workload_cleanup=True)
            LOG.debug("Workload ID: " + str(workload_id))
            if(workload_id != None):
                self.wait_for_workload_tobe_available(workload_id)
                if(self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                raise Exception("Workload creation failed") 

            ### Full snapshot ###

            snapshot_id=self.workload_snapshot(workload_id, True, snapshot_cleanup=True)
            LOG.debug("\nworkload id : {}\n".format(str(workload_id)))
            LOG.debug("\nsnapshot id : {}\n".format(str(snapshot_id)))    
            time.sleep(5)
            self.wait_for_workload_tobe_available(workload_id)
            if(self.getSnapshotStatus(workload_id, snapshot_id) == "available"):
                reporting.add_test_step("Create full snapshot of boot from volume instance", tvaultconf.PASS)
                LOG.debug("Full snapshot available!!")
            else:
                reporting.add_test_step("Create full snapshot of boot from volume instance", tvaultconf.FAIL)
                raise Exception("Snapshot creation failed")

            volume_snapshots = self.get_available_volume_snapshots()

            LOG.debug("\nvolume is : {}\n".format(str(volume_id)))
            LOG.debug("\nvolume snapshots : {}\n".format(str(volume_snapshots)))

            ### Incremental snapshot ###

            self.created = False
            LOG.debug("workload is:" + str(workload_id))

            create_snapshot = command_argument_string.incr_snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step("Execute workload-snapshot command", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-snapshot command", tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            incr_snapshot_id = query_data.get_inprogress_snapshot_id(workload_id)
            LOG.debug("Incremental Snapshot ID: " + str(incr_snapshot_id))
            #Wait for incremental snapshot to complete
            wc = self.wait_for_snapshot_tobe_available(workload_id, incr_snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Incremental snapshot", tvaultconf.PASS)
                LOG.debug("Workload incremental snapshot successfully completed")
                self.created = True
            if (self.created == False):
                reporting.add_test_step("Incremental snapshot", tvaultconf.FAIL)
                raise Exception ("Workload incremental snapshot did not get created")

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.snapshot_delete,workload_id, incr_snapshot_id)

            ### Selective restore ###

            instance_details = []
            network_details  = []
            restored_vm_details_list = []
            vms_details_after_restore = []
            int_net_1_name = self.get_net_name(CONF.network.internal_network_id)
            LOG.debug("int_net_1_name" + str(int_net_1_name))
            int_net_1_subnets = self.get_subnet_id(CONF.network.internal_network_id)
            LOG.debug("int_net_1_subnet" + str(int_net_1_subnets))

            #Create instance details for restore.json


            vm_name = "tempest_test_vm_"+vm_id+"_restored"
            temp_instance_data = { 'id': vm_id,
                   'availability_zone':CONF.compute.vm_availability_zone,
                                   'include': True,
                                   'restore_boot_disk': True,
                                   'name': vm_name
                                    }
            instance_details.append(temp_instance_data)
            LOG.debug("Instance details for restore: " + str(instance_details))

            #Create network details for restore.json
            snapshot_network = {
                                 'id': CONF.network.internal_network_id,
                                 'subnet': { 'id': int_net_1_subnets }
                               }
            target_network = { 'name': int_net_1_name,
                               'id': CONF.network.internal_network_id,
                               'subnet': { 'id': int_net_1_subnets }
                             }
            network_details = [ { 'snapshot_network': snapshot_network,
                                       'target_network': target_network } ]
            LOG.debug("Network details for restore: " + str(network_details))


            LOG.debug("Snapshot id : " + str(snapshot_id))
            #Trigger selective restore
            restore_id_1=self.snapshot_selective_restore(workload_id, snapshot_id, restore_cleanup=True, restore_name=tvaultconf.restore_name,
                                                            instance_details=instance_details, network_details=network_details)
            LOG.debug("\nRestore ID(selective) : {}\n".format(restore_id_1))
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if(self.getRestoreStatus(workload_id, snapshot_id, restore_id_1) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                raise Exception("Selective restore failed")

            #Fetch instance details after restore
            vm_list  =  self.get_restored_vm_list(restore_id_1)
            LOG.debug("Restored vm(selective) ID : " + str(vm_list))

            for id in range(len(vm_list)):
                restored_vm_details_list.append(self.get_vm_details(vm_list[id]))
            LOG.debug("Restored vm details list: " + str(restored_vm_details_list))

            vms_details_after_restore = self.get_vms_details_list(restored_vm_details_list)
            LOG.debug("VM details after restore: " + str(vms_details_after_restore))

            #Compare the data before and after restore
            for i in range(len(vms_details_after_restore)):
                if(vms_details_after_restore[i]['network_name'] == int_net_1_name):
                    reporting.add_test_step("Network verification for instance-" + str(i+1), tvaultconf.PASS)
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error("Restored network: " + str(vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step("Network verification for instance-" + str(i+1), tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)

            restored_volumes = self.get_restored_volume_list(restore_id_1)
            LOG.debug("Restored volumes list: "+str(restored_volumes))


            ### Inplace restore ###

            #Create in-place restore with CLI command
            restore_command  = command_argument_string.inplace_restore + str(tvaultconf.restore_filename) + " "  + str(incr_snapshot_id)

            LOG.debug("inplace restore cli command: " + str(restore_command))
            #Restore.json with only volume 2 excluded
            restore_json = json.dumps({
            'openstack': {
                'instances': [{
                    'restore_boot_disk': True,
                    'include': True,
                    'id': vm_id
                }],
                'networks_mapping': {
                    'networks': []
                }
            },
            'restore_type': 'inplace',
            'type': 'openstack'
    })
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            #Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(json.loads(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Triggering In-Place restore via CLI", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Triggering In-Place restore via CLI", tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #get restore id from database
            restore_id_2 = query_data.get_snapshot_restore_id(incr_snapshot_id)
            LOG.debug("\nRestore ID(inplace) : {}\n".format(restore_id_2))
            self.wait_for_snapshot_tobe_available(workload_id, incr_snapshot_id)

            #get in-place restore status
            if(self.getRestoreStatus(workload_id, incr_snapshot_id, restore_id_2) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            #Fetch instance details after restore
            vm_list  =  self.get_restored_vm_list(restore_id_2)
            LOG.debug("Restored vm(In-place) ID : " + str(vm_list))

            #Delete restore for snapshot
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, incr_snapshot_id, restore_id_2)

            ### Oneclick restore ###

            #Delete the original instance
            self.delete_vm(vm_id)
            LOG.debug("Instance deleted successfully for one click restore : "+str(vm_id))
            deleted = 1
            time.sleep(10)
            self.delete_volume(volume_id)
            LOG.debug("Volume deleted successfully for one click restore : "+str(volume_id))

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Execute snapshot-oneclick-restore command", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute snapshot-oneclick-restore command", tvaultconf.PASS)
                LOG.debug("Command executed correctly")


            restore_id_3 = query_data.get_snapshot_restore_id(snapshot_id)
            LOG.debug("\nRestore ID(oneclick): {}\n".format(str(restore_id_3)))

            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if(self.getRestoreStatus(workload_id, snapshot_id, restore_id_3) == "available"):
                reporting.add_test_step("One-click restore", tvaultconf.PASS)
                LOG.debug("One-click restore passed")
            else:
                reporting.add_test_step("One-click restore", tvaultconf.FAIL)
                LOG.debug("One-click restore failed")
                raise Exception("One-click restore failed")
            LOG.debug("One-click restore complete")

            restored_volumes = self.get_restored_volume_list(restore_id_3)
            vm_list  =  self.get_restored_vm_list(restore_id_3)

            LOG.debug("Restored vms : " + str(vm_list))
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, snapshot_id, restore_id_3)
                time.sleep(20)
                self.addCleanup(self.delete_restored_vms, vm_list, restored_volumes)
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            if (deleted == 0):
                self.delete_vm(vm_id)
                time.sleep(10)
                self.delete_volume(volume_id)
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
示例#20
0
    def test_1_config_backup(self):
        global config_backup_id
        reporting.add_test_script(str(__name__) + "_default + added_dir: cli")
        try:
            # prerequisite handles config_user creation and config_backup_pvk(private key) creation and config_workload configuration

            config_workload_md5sum_before_backup = self.calculate_md5_config_backup(
                added_dir=tvaultconf.additional_dir)
            LOG.debug("config_workload_md5sum_before_backup: " +
                      str(config_workload_md5sum_before_backup))

            #config backup configuration with CLI command
            config_backup_command = command_argument_string.config_backup

            LOG.debug("config backup cli command: " +
                      str(config_backup_command))

            rc = cli_parser.cli_returncode(config_backup_command)
            if rc != 0:
                reporting.add_test_step(
                    "Triggering config_backup command via CLI",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Triggering config_backup command via CLI",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            time.sleep(10)
            config_backup_id = query_data.get_config_backup_id()
            LOG.debug("Config backup id: " + str(config_backup_id))

            if (config_backup_id != None):
                status = self.wait_for_config_backup_tobe_available(
                    config_backup_id)
                if status == "available":
                    LOG.debug("config backup detials: " +
                              str(self.show_config_backup(config_backup_id)))
                    reporting.add_test_step("Config Backup", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Config Backup", tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step("Config Backup", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                raise Exception("Config Backup Failed.")

            config_workload_md5sum_after_backup = self.calculate_md5_config_backup(
                added_dir=tvaultconf.additional_dir)
            LOG.debug("config_workload_md5sum_after_backup: " +
                      str(config_workload_md5sum_after_backup))

            if config_workload_md5sum_before_backup == config_workload_md5sum_after_backup:
                reporting.add_test_step("Config backup md5 verification",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step("Config backup md5 verification",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            vault_storage_path = self.show_config_backup(config_backup_id)[
                'config_backup']['vault_storage_path'].rstrip()

            compute_hostname = self.get_compute_hostname().rstrip()

            config_workload_md5sum_after_backup_vault_storage = self.calculate_md5_config_backup(
                vault_storage_path=vault_storage_path,
                compute_hostname=compute_hostname,
                added_dir=tvaultconf.additional_dir)
            LOG.debug("config_workload_vault_md5_sum_vault_storage: " +
                      str(config_workload_md5sum_after_backup_vault_storage))

            if config_workload_md5sum_before_backup == config_workload_md5sum_after_backup_vault_storage:
                reporting.add_test_step(
                    "Config backup md5 verification : vault_storage",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Config backup md5 verification: vault_storage",
                    tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
示例#21
0
    def test_tvault1290_delete_restore(self):
        try:
            # Prerequisites
            self.created = False
            self.workload_instances = []

            # Launch instance
            self.vm_id = self.create_vm(vm_cleanup=False)
            LOG.debug("VM ID: " + str(self.vm_id))

            # Create volume
            self.volume_id = self.create_volume(volume_cleanup=False)
            LOG.debug("Volume ID: " + str(self.volume_id))

            # Attach volume to the instance
            self.attach_volume(self.volume_id, self.vm_id,
                               attach_cleanup=False)
            LOG.debug("Volume attached")

            # Create workload
            self.workload_instances.append(self.vm_id)
            self.wid = self.workload_create(
                self.workload_instances,
                tvaultconf.parallel,
                workload_name=tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(self.wid))
            time.sleep(5)

            # Create snapshot
            self.snapshot_id = self.workload_snapshot(
                self.wid, True, tvaultconf.snapshot_name)
            LOG.debug("Snapshot ID: " + str(self.snapshot_id))

            # Wait till snapshot is complete
            wc = query_data.get_workload_snapshot_status(
                tvaultconf.snapshot_name, tvaultconf.snapshot_type_full, self.snapshot_id)
            LOG.debug("Workload snapshot status: " + str(wc))
            while (str(wc) != "available" or str(wc) != "error"):
                time.sleep(5)
                wc = query_data.get_workload_snapshot_status(
                    tvaultconf.snapshot_name, tvaultconf.snapshot_type_full, self.snapshot_id)
                LOG.debug("Workload snapshot status: " + str(wc))
                if (str(wc) == "available"):
                    LOG.debug("Workload snapshot successfully completed")
                    self.created = True
                    break
                else:
                    if (str(wc) == "error"):
                        break
            if (self.created == False):
                raise Exception("Workload snapshot did not get created")

            # Delete instance
            self.delete_vm(self.vm_id)
            LOG.debug("Instance deleted successfully")

            # Delete corresponding volume
            self.delete_volume(self.volume_id)
            LOG.debug("Volume deleted successfully")

            # Create one-click restore
            self.restore_id = self.snapshot_restore(
                self.wid, self.snapshot_id, tvaultconf.restore_name, restore_cleanup=False)
            LOG.debug("Restore ID: " + str(self.restore_id))
            self.wait_for_snapshot_tobe_available(self.wid, self.snapshot_id)

            self.restore_vm_id = self.get_restored_vm_list(self.restore_id)
            LOG.debug("Restore VM ID: " + str(self.restore_vm_id))

            self.restore_volume_id = self.get_restored_volume_list(
                self.restore_id)
            LOG.debug("Restore Volume ID: " + str(self.restore_volume_id))

            # Delete restore for snapshot using CLI command
            rc = cli_parser.cli_returncode(
                command_argument_string.restore_delete + self.restore_id)
            if rc != 0:
                reporting.add_test_step(
                    "Execute restore-delete command", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute restore-delete command", tvaultconf.PASS)
                LOG.debug("Command executed correctly")
            time.sleep(5)

            wc = query_data.get_snapshot_restore_delete_status(
                tvaultconf.restore_name, tvaultconf.restore_type)
            if (str(wc) == "1"):
                reporting.add_test_step("Verification", tvaultconf.PASS)
                LOG.debug("Snapshot restore successfully deleted")
            else:
                reporting.add_test_step("Verification", tvaultconf.FAIL)
                raise Exception("Restore did not get deleted")

            # Cleanup
            # Delete restored VM instance and volume
            self.delete_restored_vms(
                self.restore_vm_id, self.restore_volume_id)
            LOG.debug("Restored VMs deleted successfully")
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
示例#22
0
    def test_6_regression(self):
        reporting.add_test_script(
            str(__name__) + "_one_click_restore_bootfrom_image")
        try:
            if self.exception != "":
                LOG.debug("pre req failed")
                reporting.add_test_step(str(self.exception), tvaultconf.FAIL)
                raise Exception(str(self.exception))
            LOG.debug("pre req completed")

            self.created = False

            #Delete the original instance
            self.delete_vms(self.workload_instances)
            self.delete_key_pair(tvaultconf.key_pair_name)
            self.delete_security_group(self.security_group_id)
            self.delete_flavor(self.flavor_id)
            LOG.debug("Instances deleted successfully")

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + self.snapshot_ids[
                1]
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            wc = query_data.get_snapshot_restore_status(
                tvaultconf.restore_name, self.snapshot_ids[1])
            LOG.debug("Snapshot restore status: " + str(wc))
            while (str(wc) != "available" or str(wc) != "error"):
                time.sleep(5)
                wc = query_data.get_snapshot_restore_status(
                    tvaultconf.restore_name, self.snapshot_ids[1])
                LOG.debug("Snapshot restore status: " + str(wc))
                if (str(wc) == "available"):
                    LOG.debug("Snapshot Restore successfully completed")
                    reporting.add_test_step(
                        "Snapshot one-click restore verification with DB",
                        tvaultconf.PASS)
                    self.created = True
                    break
                else:
                    if (str(wc) == "error"):
                        break

            if (self.created == False):
                reporting.add_test_step(
                    "Snapshot one-click restore verification with DB",
                    tvaultconf.FAIL)
                raise Exception("Snapshot Restore did not get created")

            self.restore_id = query_data.get_snapshot_restore_id(
                self.snapshot_id)
            LOG.debug("Restore ID: " + str(self.restore_id))

            #Fetch instance details after restore
            self.restored_vm_details_list = []

            #restored vms list
            self.vm_list = self.get_restored_vm_list(self.restore_id)
            LOG.debug("Restored vms : " + str(self.vm_list))

            #restored vms all details list
            for id in range(len(self.workload_instances)):
                self.restored_vm_details_list.append(
                    self.get_vm_details(self.vm_list[id]))
            LOG.debug("Restored vm details list: " +
                      str(self.restored_vm_details_list))

            #required details of restored vms
            self.vms_details_after_restore = self.get_vms_details_list(
                self.restored_vm_details_list)
            LOG.debug("VM details after restore: " +
                      str(self.vms_details_after_restore))

            #Verify floating ips
            self.floating_ips_after_restore = []
            for i in range(len(self.vms_details_after_restore)):
                self.floating_ips_after_restore.append(
                    self.vms_details_after_restore[i]['floating_ip'])
            if (self.floating_ips_after_restore.sort() ==
                    self.floating_ips_list.sort()):
                reporting.add_test_step("Floating ip verification",
                                        tvaultconf.PASS)
            else:
                LOG.error("Floating ips before restore: " +
                          str(self.floating_ips_list.sort()))
                LOG.error("Floating ips after restore: " +
                          str(self.floating_ips_after_restore.sort()))
                reporting.add_test_step("Floating ip verification",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

        #calculate md5sum after restore
            tree = lambda: collections.defaultdict(tree)
            md5_sum_after_oneclick_restore = tree()
            for floating_ip in self.floating_ips_list:
                for mount_point in mount_points:
                    ssh = self.SshRemoteMachineConnectionWithRSAKey(
                        str(floating_ip))
                    md5_sum_after_oneclick_restore[str(floating_ip)][str(
                        mount_point)] = self.calculatemmd5checksum(
                            ssh, mount_point)
                    ssh.close()
            LOG.debug("md5_sum_after_oneclick_restore" +
                      str(md5_sum_after_oneclick_restore))

            #md5sum verification
            if (self.md5sums_dir_before == md5_sum_after_oneclick_restore):
                reporting.add_test_step("Md5 Verification", tvaultconf.PASS)
            else:
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.add_test_step("Md5 Verification", tvaultconf.FAIL)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
示例#23
0
    def test_1_image_booted(self):
        try:
            deleted = 0
            ## VM and Workload ###
            tests = [['tempest.api.workloadmgr.restore.test_image_booted_Selective-restore',0], ['tempest.api.workloadmgr.restore.test_image_booted_Inplace-restore',0], ['tempest.api.workloadmgr.restore.test_image_booted_Oneclick-restore',0]]
            reporting.add_test_script(tests[0][0])
            data_dir_path = "/root"
            md5sums_before_full = {}
            LOG.debug("******************")            
            kp = self.create_key_pair(tvaultconf.key_pair_name, keypair_cleanup=True)
            LOG.debug("Key_pair : "+str(kp))            

            vm_id = self.create_vm(key_pair=kp, vm_cleanup=False)
            LOG.debug("VM ID : "+str(vm_id))
            time.sleep(30)

            floating_ip_1 = self.assign_floating_ips(vm_id, False)
            LOG.debug("Assigned floating IP : "+str(floating_ip_1))

            LOG.debug("Sleeping for 20 sec")
            time.sleep(20)
            
            self.data_ops(floating_ip_1, data_dir_path, 3)
            LOG.debug("Created data")            

            md5sums_before_full = self.calcmd5sum(floating_ip_1, data_dir_path)
            LOG.debug("MD5sums for directory on original vm : "+str(md5sums_before_full))

            
            workload_create = command_argument_string.workload_create + " --instance instance-id=" +str(vm_id)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step("Execute workload-create command", tvaultconf.FAIL)
                raise Exception("Workload-create command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-create command", tvaultconf.PASS)
                LOG.debug("Workload-create command executed correctly")

            time.sleep(10)
            workload_id = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(workload_id))
            if(workload_id != None):
                self.wait_for_workload_tobe_available(workload_id)
                if(self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    raise Exception("Workload creation failed")
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                raise Exception("Workload creation failed")

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.workload_delete, workload_id)

            ### Full snapshot ###

            snapshot_id = self.create_snapshot(workload_id, is_full=True)

            #Add some more data to files on VM
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.addCustomfilesOnLinuxVM(ssh, data_dir_path, 2)
            ssh.close()
            md5sums_before_incremental = {}
            md5sums_before_incremental = self.calcmd5sum(floating_ip_1, data_dir_path)
            LOG.debug("\nMD5SUM after adding additional data before incremental snapshot : {}\n".format(md5sums_before_incremental))

            ### Incremental snapshot ###

            incr_snapshot_id = self.create_snapshot(workload_id, is_full=False)

            ### Selective restore ###

            rest_details = {}
            rest_details['rest_type'] = 'selective'
            rest_details['network_id'] = CONF.network.internal_network_id
            rest_details['subnet_id'] = self.get_subnet_id(CONF.network.internal_network_id)
            volumeslist =  []
            rest_details['instances'] = {vm_id:volumeslist}

            payload = self.create_restore_json(rest_details)
            #Trigger selective restore
            restore_id_1=self.snapshot_selective_restore(workload_id, snapshot_id,restore_name=tvaultconf.restore_name, restore_cleanup=True,
                                                            instance_details=payload['instance_details'], network_details=payload['network_details'])
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if(self.getRestoreStatus(workload_id, snapshot_id, restore_id_1) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                raise Exception("Selective restore failed")

            #Fetch instance details after restore
            vm_list  =  self.get_restored_vm_list(restore_id_1)
            LOG.debug("Restored vm(selective) ID : " + str(vm_list))
            time.sleep(60)
            floating_ip_2 = self.assign_floating_ips(vm_list[0], True)
            LOG.debug("Floating ip assigned to selective restore vm -> "+str(floating_ip_2))
            md5sums_after_selective = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_2))
            md5sums_after_selective = self.calcmd5sum(floating_ip_2, data_dir_path)
            ssh.close()

            LOG.debug("MD5SUMS before restore")
            LOG.debug(md5sums_before_full[str(floating_ip_1)])
            LOG.debug("MD5SUMS after selective restore")
            LOG.debug(md5sums_after_selective[str(floating_ip_2)])

            if md5sums_before_full[str(floating_ip_1)] == md5sums_after_selective[str(floating_ip_2)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.PASS)
                reporting.set_test_script_status(tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            restored_vm_details = []
            for id in range(len(vm_list)):
                restored_vm_details.append(self.get_vm_details(vm_list[id]))
            LOG.debug("Restored vm details list: " + str(restored_vm_details))

            vms_details_after_restore = self.get_vms_details_list(restored_vm_details)
            LOG.debug("VM details after restore: " + str(vms_details_after_restore))
            #Compare the data before and after restore
            int_net_1_name = self.get_net_name(CONF.network.internal_network_id) 
            for i in range(len(vms_details_after_restore)):
                if(vms_details_after_restore[i]['network_name'] == int_net_1_name):
                    reporting.add_test_step("Network verification for instance-" + str(i+1), tvaultconf.PASS)
                    tests[0][1] = 1
                    reporting.test_case_to_write()
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error("Restored network: " + str(vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step("Network verification for instance-" + str(i+1), tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
                    reporting.test_case_to_write()


            ### In-place Restore ###

            rest_details = {}
            rest_details['rest_type'] = 'inplace'
            rest_details['instances'] = {vm_id:volumeslist}

            reporting.add_test_script(tests[1][0]) 
            #Create in-place restore with CLI command
            restore_command  = command_argument_string.inplace_restore + str(tvaultconf.restore_filename) + " "  + str(snapshot_id)
            payload = self.create_restore_json(rest_details)
            restore_json = json.dumps(payload)
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            #Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(yaml.safe_load(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Triggering In-Place restore via CLI", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Triggering In-Place restore via CLI", tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #get restore id from database
            restore_id_2 = query_data.get_snapshot_restore_id(snapshot_id)
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)

            #get in-place restore status
            if(self.getRestoreStatus(workload_id, snapshot_id, restore_id_2) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            #Fetch instance details after restore
            vm_list = []
            vm_list  =  self.get_restored_vm_list(restore_id_2)
            LOG.debug("Restored vm(In-place) ID : " + str(vm_list))

            time.sleep(60)
            md5sums_after_inplace = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            md5sums_after_inplace = self.calcmd5sum(floating_ip_1, data_dir_path)
            ssh.close()

            LOG.debug("<----md5sums_before_full---->")
            LOG.debug(md5sums_before_full[str(floating_ip_1)])
            LOG.debug("<----md5sums_after_inplace---->")
            LOG.debug(md5sums_after_inplace[str(floating_ip_1)])

            if md5sums_before_full[str(floating_ip_1)] == md5sums_after_inplace[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.PASS)
                tests[1][1] = 1
                reporting.set_test_script_status(tvaultconf.PASS)
                reporting.test_case_to_write()
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.test_case_to_write()

            #Delete restore for snapshot
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, snapshot_id, restore_id_2)


            ### One-click restore ###

            reporting.add_test_script(tests[2][0])

            #Delete the original instance
            self.delete_vm(vm_id)
            LOG.debug("Instance deleted successfully for one click restore : "+str(vm_id))
            time.sleep(10)

            deleted = 1

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + incr_snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Execute snapshot-oneclick-restore command", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute snapshot-oneclick-restore command", tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            restore_id_3 = query_data.get_snapshot_restore_id(incr_snapshot_id)
            LOG.debug("Restore ID: " + str(restore_id_3))

            self.wait_for_snapshot_tobe_available(workload_id, incr_snapshot_id)
            if(self.getRestoreStatus(workload_id, incr_snapshot_id, restore_id_3) == "available"):
                reporting.add_test_step("One-click restore", tvaultconf.PASS)
                LOG.debug("One-click restore passed")
            else:
                reporting.add_test_step("One-click restore", tvaultconf.FAIL)
                LOG.debug("One-click restore failed")
                raise Exception("One-click restore failed")
            LOG.debug("One-click restore complete")

            #Fetch instance details after restore
            vm_list = []
            vm_list  =  self.get_restored_vm_list(restore_id_3)
            LOG.debug("Restored vms : " + str(vm_list))

            md5sums_after_1clickrestore = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            md5sums_after_1clickrestore = self.calcmd5sum(floating_ip_1, data_dir_path)
            LOG.debug("MD5SUMS after one click restore : {}".format(md5sums_after_1clickrestore))
            ssh.close()

            if md5sums_before_incremental[str(floating_ip_1)] == md5sums_after_1clickrestore[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.PASS)
                tests[2][0] = 1
                reporting.set_test_script_status(tvaultconf.PASS)
                reporting.test_case_to_write()
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.test_case_to_write()

            restored_volumes = []
            restored_volumes = self.get_restored_volume_list(restore_id_3)
            LOG.debug("Restored volumes : ")
            LOG.debug(restored_volumes)

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, incr_snapshot_id, restore_id_3)
                time.sleep(30)
                self.addCleanup(self.delete_restored_vms, vm_list, restored_volumes)

        except Exception as e:
            LOG.error("Exception: " + str(e))
            if (deleted == 0):
                try:
                    self.delete_vm(vm_id)
                except:
                    pass
            for test in tests:
                if test[1] != 1:
                    reporting.add_test_script(test[0])
                    reporting.set_test_script_status(tvaultconf.FAIL)
                    reporting.test_case_to_write()
示例#24
0
    def test_1_volume_volume(self):
        try:
            ### VM and Workload ###
            tests = [['tempest.api.workloadmgr.restore.test_volume_vol_Selective-restore',
                      0],
                     ['tempest.api.workloadmgr.restore.test_volume_vol_Inplace-restore',
                      0],
                     ['tempest.api.workloadmgr.restore.test_volume_vol_Oneclick-restore',
                      0]]
            reporting.add_test_script(tests[0][0])
            deleted = 0
            global volumes
            mount_points = ["mount_data_a", "mount_data_b"]
            md5sums_before_full = {}

            # Create Keypair
            kp = self.create_key_pair(
                tvaultconf.key_pair_name, keypair_cleanup=True)
            LOG.debug("Key_pair : " + str(kp))

            # Create bootable volume
            boot_volume_id = self.create_volume(
                size=tvaultconf.bootfromvol_vol_size,
                image_id=CONF.compute.image_ref,
                volume_cleanup=False)
            self.set_volume_as_bootable(boot_volume_id)
            LOG.debug("Bootable Volume ID : " + str(boot_volume_id))

            self.block_mapping_details = [{"source_type": "volume",
                                           "delete_on_termination": "false",
                                           "boot_index": 0,
                                           "uuid": boot_volume_id,
                                           "destination_type": "volume"}]

            # Create instance
            vm_id = self.create_vm(
                key_pair=kp,
                image_id="",
                block_mapping_data=self.block_mapping_details,
                vm_cleanup=False)
            LOG.debug("VM ID : " + str(vm_id))
            time.sleep(30)

            # Create and attach volume
            volume_id = self.create_volume(
                volume_type_id=CONF.volume.volume_type_id,
                volume_cleanup=False)
            LOG.debug("Volume ID: " + str(volume_id))
            volumes = tvaultconf.volumes_parts

            self.attach_volume(volume_id, vm_id, attach_cleanup=False)
            LOG.debug("Volume attached")

            # Assign floating IP
            floating_ip_1 = self.assign_floating_ips(vm_id, False)
            LOG.debug("Assigned floating IP : " + str(floating_ip_1))
            LOG.debug("Sleeping for 40 sec")
            time.sleep(40)

            if CONF.validation.ssh_user == 'ubuntu':
                self.install_qemu_ga(floating_ip_1)

            # Adding data and calculating md5sums
            self.data_ops(floating_ip_1, mount_points[0], 3)
            LOG.debug("Created disk and mounted the attached volume")

            md5sums_before_full = self.calcmd5sum(
                floating_ip_1, mount_points[0])
            LOG.debug("\nMD5SUM of the data before full snapshot : {}\n".format(
                md5sums_before_full))

            workload_create = command_argument_string.workload_create + \
                " --instance instance-id=" + str(vm_id)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-create command", tvaultconf.FAIL)
                raise Exception(
                    "Workload-create command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-create command", tvaultconf.PASS)
                LOG.debug("Workload-create command executed correctly")

            time.sleep(10)
            workload_id = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(workload_id))
            if(workload_id is not None):
                self.wait_for_workload_tobe_available(workload_id)
                if(self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)

            if (tvaultconf.cleanup):
                self.addCleanup(self.workload_delete, workload_id)

            ### Full Snapshot ###

            snapshot_id = self.create_snapshot(workload_id, is_full=True)

            # Add some more data to files on VM
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.addCustomfilesOnLinuxVM(ssh, mount_points[0], 2)
            ssh.close()
            md5sums_before_incremental = {}
            md5sums_before_incremental = self.calcmd5sum(
                floating_ip_1, mount_points[0])
            LOG.debug("\nMD5SUM after adding additional data before incremental snapshot : {}\n".format(
                md5sums_before_incremental))

            ### Incremental snapshot ###

            incr_snapshot_id = self.create_snapshot(workload_id, is_full=False)

            ### Selective restore ###

            rest_details = {}
            rest_details['rest_type'] = 'selective'
            rest_details['network_id'] = CONF.network.internal_network_id
            rest_details['subnet_id'] = self.get_subnet_id(
                CONF.network.internal_network_id)
            volumeslist = [boot_volume_id, volume_id]
            rest_details['instances'] = {vm_id: volumeslist}

            payload = self.create_restore_json(rest_details)
            # Trigger selective restore
            restore_id_1 = self.snapshot_selective_restore(
                workload_id,
                snapshot_id,
                restore_name=tvaultconf.restore_name,
                restore_cleanup=True,
                instance_details=payload['instance_details'],
                network_details=payload['network_details'])
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if(self.getRestoreStatus(workload_id, snapshot_id, restore_id_1) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                raise Exception("Selective restore failed")

            # Fetch instance details after restore
            vm_list = self.get_restored_vm_list(restore_id_1)
            LOG.debug("Restored vm(selective) ID : " + str(vm_list))
            time.sleep(60)
            floating_ip_2 = self.assign_floating_ips(vm_list[0], True)
            LOG.debug(
                "Floating ip assigned to selective restore vm -> " +
                str(floating_ip_2))
            md5sums_after_selective = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_2))
            self.execute_command_disk_mount(ssh, str(floating_ip_2), [
                                            volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_after_selective = self.calcmd5sum(
                floating_ip_2, mount_points[0])
            ssh.close()

            LOG.debug("MD5SUMS before restore")
            LOG.debug(md5sums_before_full[str(floating_ip_1)])
            LOG.debug("MD5SUMS after selective restore")
            LOG.debug(md5sums_after_selective[str(floating_ip_2)])

            if md5sums_before_full[str(
                floating_ip_1)] == md5sums_after_selective[str(floating_ip_2)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step(
                    "Md5 Verification for volume", tvaultconf.PASS)
                reporting.set_test_script_status(tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step(
                    "Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            restored_vm_details = []
            for id in range(len(vm_list)):
                restored_vm_details.append(self.get_vm_details(vm_list[id]))
            LOG.debug("Restored vm details list: " + str(restored_vm_details))

            vms_details_after_restore = self.get_vms_details_list(
                restored_vm_details)
            LOG.debug("VM details after restore: " +
                      str(vms_details_after_restore))
            # Compare the data before and after restore
            int_net_1_name = self.get_net_name(
                CONF.network.internal_network_id)
            for i in range(len(vms_details_after_restore)):
                if(vms_details_after_restore[i]['network_name'] == int_net_1_name):
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1), tvaultconf.PASS)
                    tests[0][1] = 1
                    reporting.test_case_to_write()
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error("Restored network: " +
                              str(vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1), tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
                    reporting.test_case_to_write()

            ### In-place restore ###

            rest_details = {}
            rest_details['rest_type'] = 'inplace'
            rest_details['instances'] = {vm_id: volumeslist}

            reporting.add_test_script(tests[1][0])
            # Create in-place restore with CLI command
            restore_command = command_argument_string.inplace_restore + \
                str(tvaultconf.restore_filename) + " " + str(snapshot_id)
            payload = self.create_restore_json(rest_details)
            restore_json = json.dumps(payload)
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            # Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(yaml.safe_load(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Triggering In-Place restore via CLI", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Triggering In-Place restore via CLI", tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            # get restore id from database
            restore_id_2 = query_data.get_snapshot_restore_id(snapshot_id)
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)

            # get in-place restore status
            if(self.getRestoreStatus(workload_id, snapshot_id, restore_id_2) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            # Fetch instance details after restore
            vm_list = []
            vm_list = self.get_restored_vm_list(restore_id_2)
            LOG.debug("Restored vm(In-place) ID : " + str(vm_list))

            time.sleep(60)
            md5sums_after_inplace = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.execute_command_disk_mount(ssh, str(floating_ip_1), [
                                            volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_after_inplace = self.calcmd5sum(
                floating_ip_1, mount_points[0])
            ssh.close()

            LOG.debug("<----md5sums_before_full---->")
            LOG.debug(md5sums_before_full[str(floating_ip_1)])
            LOG.debug("<----md5sums_after_inplace---->")
            LOG.debug(md5sums_after_inplace[str(floating_ip_1)])

            if md5sums_before_full[str(
                floating_ip_1)] == md5sums_after_inplace[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step(
                    "Md5 Verification for volume", tvaultconf.PASS)
                tests[1][1] = 1
                reporting.set_test_script_status(tvaultconf.PASS)
                reporting.test_case_to_write()
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step(
                    "Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.test_case_to_write()

            # Delete restore for snapshot
            if (tvaultconf.cleanup):
                self.addCleanup(self.restore_delete, workload_id,
                                snapshot_id, restore_id_2)

            ### One-click restore ###

            reporting.add_test_script(tests[2][0])

            self.detach_volume(vm_id, volume_id)

            # Delete the original instance
            self.delete_vm(vm_id)
            LOG.debug(
                "Instance deleted successfully for one click restore : " +
                str(vm_id))
            time.sleep(10)

            # Delete bootable volume of original instance
            self.delete_volume(boot_volume_id)
            LOG.debug("Bootable volume of original instance deleted")

            # Delete volume attached to original instance
            self.delete_volume(volume_id)
            LOG.debug(
                "Volumes deleted successfully for one click restore : " +
                str(volume_id))

            deleted = 1

            # Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + incr_snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            restore_id_3 = query_data.get_snapshot_restore_id(incr_snapshot_id)
            LOG.debug("Restore ID: " + str(restore_id_3))

            self.wait_for_snapshot_tobe_available(
                workload_id, incr_snapshot_id)
            if(self.getRestoreStatus(workload_id, incr_snapshot_id, restore_id_3) == "available"):
                reporting.add_test_step("One-click restore", tvaultconf.PASS)
                LOG.debug("One-click restore passed")
            else:
                reporting.add_test_step("One-click restore", tvaultconf.FAIL)
                LOG.debug("One-click restore failed")
                raise Exception("One-click restore failed")
            LOG.debug("One-click restore complete")

            # Fetch instance details after restore
            vm_list = []
            vm_list = self.get_restored_vm_list(restore_id_3)
            LOG.debug("Restored vms : " + str(vm_list))

            md5sums_after_1clickrestore = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.execute_command_disk_mount(ssh, str(floating_ip_1), [
                                            volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_after_1clickrestore = self.calcmd5sum(
                floating_ip_1, mount_points[0])
            LOG.debug("MD5SUMS after one click restore : {}".format(
                md5sums_after_1clickrestore))
            ssh.close()

            if md5sums_before_incremental[str(
                floating_ip_1)] == md5sums_after_1clickrestore[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step(
                    "Md5 Verification for volume", tvaultconf.PASS)
                tests[2][0] = 1
                reporting.set_test_script_status(tvaultconf.PASS)
                reporting.test_case_to_write()
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step(
                    "Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.test_case_to_write()

            restored_volumes = []
            restored_volumes = self.get_restored_volume_list(restore_id_3)
            LOG.debug("Restored volumes : ")
            LOG.debug(restored_volumes)

            if (tvaultconf.cleanup):
                self.addCleanup(self.restore_delete, workload_id,
                                incr_snapshot_id, restore_id_3)
                time.sleep(30)
                self.addCleanup(self.delete_restored_vms,
                                vm_list, restored_volumes)

        except Exception as e:
            LOG.error("Exception: " + str(e))
            if (deleted == 0):
                try:
                    self.delete_vm(vm_id)
                except BaseException:
                    pass
                time.sleep(10)
                try:
                    self.delete_volume(volume_id)
                    self.delete_volume(boot_volume_id)
                except BaseException:
                    pass
            for test in tests:
                if test[1] != 1:
                    reporting.add_test_script(test[0])
                    reporting.set_test_script_status(tvaultconf.FAIL)
                    reporting.test_case_to_write()
示例#25
0
    def test_1_volume_volume(self):
        try:
            ### VM and Workload ###

            reporting.add_test_script(str(__name__))

            deleted = 0
            global volumes
            mount_points = ["mount_data_a", "mount_data_b"]
            md5sums_dir_before = {}

            #Create Keypair
            kp = self.create_key_pair(tvaultconf.key_pair_name,
                                      keypair_cleanup=True)
            LOG.debug("Key_pair : " + str(kp))

            #Create bootable volume
            boot_volume_id = self.create_volume(
                image_id=CONF.compute.image_ref, volume_cleanup=False)
            self.set_volume_as_bootable(boot_volume_id)
            LOG.debug("Bootable Volume ID : " + str(boot_volume_id))

            self.block_mapping_details = [{
                "source_type": "volume",
                "delete_on_termination": "false",
                "boot_index": 0,
                "uuid": boot_volume_id,
                "destination_type": "volume"
            }]

            #Create instance
            vm_id = self.create_vm(
                key_pair=kp,
                image_id="",
                block_mapping_data=self.block_mapping_details,
                vm_cleanup=False)
            LOG.debug("VM ID : " + str(vm_id))
            time.sleep(30)

            #Create and attach volume
            volume_id = self.create_volume(
                volume_type_id=CONF.volume.volume_type_id,
                volume_cleanup=False)
            LOG.debug("Volume ID: " + str(volume_id))
            volumes = tvaultconf.volumes_parts

            self.attach_volume(volume_id, vm_id, attach_cleanup=False)
            LOG.debug("Volume attached")

            #Assign floating IP
            floating_ip_1 = self.assign_floating_ips(vm_id, False)
            LOG.debug("Assigned floating IP : " + str(floating_ip_1))
            LOG.debug("Sleeping for 40 sec")
            time.sleep(40)

            #Adding data and calculating md5sums
            self.data_ops(floating_ip_1, mount_points[0], 3)
            LOG.debug("Created disk and mounted the attached volume")

            md5sums_dir_before = self.calcmd5sum(floating_ip_1,
                                                 mount_points[0])
            LOG.debug("MD5sums for directory on original vm : " +
                      str(md5sums_dir_before))

            workload_create = command_argument_string.workload_create + " --instance instance-id=" + str(
                vm_id)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step("Execute workload-create command",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload-create command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-create command",
                                        tvaultconf.PASS)
                LOG.debug("Workload-create command executed correctly")

            time.sleep(10)
            workload_id = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(workload_id))
            if (workload_id != None):
                self.wait_for_workload_tobe_available(workload_id)
                if (self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.workload_delete, workload_id)

            ### Full Snapshot ###

            self.created = False

            #Create snapshot with CLI command
            create_snapshot = command_argument_string.snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-snapshot command with --full",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command did not execute correctly for full snapshot")
            else:
                reporting.add_test_step(
                    "Execute workload-snapshot command with --full",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly for full snapshot")

            snapshot_id = query_data.get_inprogress_snapshot_id(workload_id)
            LOG.debug("Snapshot ID: " + str(snapshot_id))
            wc = self.wait_for_snapshot_tobe_available(workload_id,
                                                       snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Full snapshot", tvaultconf.PASS)
                self.created = True
            else:
                if (str(wc) == "error"):
                    pass
            if (self.created == False):
                reporting.add_test_step("Full snapshot", tvaultconf.FAIL)
                raise Exception("Workload snapshot did not get created")

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.snapshot_delete, workload_id, snapshot_id)

            LOG.debug("Sleeping for 40s")
            time.sleep(40)

            #Add some more data to files on VM
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.addCustomfilesOnLinuxVM(ssh, mount_points[0], 2)
            ssh.close()

            ### Incremental snapshot ###

            self.created = False

            #Create incremental snapshot using CLI command
            create_snapshot = command_argument_string.incr_snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step("Execute workload-snapshot command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-snapshot command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            incr_snapshot_id = query_data.get_inprogress_snapshot_id(
                workload_id)
            LOG.debug("Incremental Snapshot ID: " + str(incr_snapshot_id))
            #Wait for incremental snapshot to complete
            wc = self.wait_for_snapshot_tobe_available(workload_id,
                                                       incr_snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Incremental snapshot",
                                        tvaultconf.PASS)
                LOG.debug(
                    "Workload incremental snapshot successfully completed")
                self.created = True
            if (self.created == False):
                reporting.add_test_step("Incremental snapshot",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload incremental snapshot did not get created")

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.snapshot_delete, workload_id,
                                incr_snapshot_id)

            ### Selective restore ###

            instance_details = []
            network_details = []
            restored_vm_details = []
            vms_details_after_restore = []
            temp_vdisks_data = []

            int_net_1_name = self.get_net_name(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_name" + str(int_net_1_name))
            int_net_1_subnets = self.get_subnet_id(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_subnet" + str(int_net_1_subnets))

            temp_vdisks_data.append([{
                'id': volume_id,
                'availability_zone': CONF.volume.volume_availability_zone,
                'new_volume_type': CONF.volume.volume_type
            }])

            LOG.debug("Vdisks details for restore" + str(temp_vdisks_data))

            #Create instance details for restore.json
            vm_name = "tempest_test_vm_" + vm_id + "_selectively_restored"
            temp_instance_data = {
                'id': vm_id,
                'availability_zone': CONF.compute.vm_availability_zone,
                'include': True,
                'restore_boot_disk': True,
                'name': vm_name,
                'vdisks': temp_vdisks_data[0]
            }
            instance_details.append(temp_instance_data)
            LOG.debug("Instance details for restore: " + str(instance_details))

            #Create network details for restore.json
            snapshot_network = {
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            target_network = {
                'name': int_net_1_name,
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            network_details = [{
                'snapshot_network': snapshot_network,
                'target_network': target_network
            }]
            LOG.debug("Network details for restore: " + str(network_details))
            LOG.debug("Snapshot id : " + str(snapshot_id))

            #Trigger selective restore
            restore_id_1 = self.snapshot_selective_restore(
                workload_id,
                snapshot_id,
                restore_name=tvaultconf.restore_name,
                restore_cleanup=True,
                instance_details=instance_details,
                network_details=network_details)
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if (self.getRestoreStatus(workload_id, snapshot_id,
                                      restore_id_1) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                raise Exception("Selective restore failed")

            #Fetch instance details after restore
            vm_list = self.get_restored_vm_list(restore_id_1)
            LOG.debug("Restored vm(selective) ID : " + str(vm_list))
            time.sleep(60)
            floating_ip_2 = self.assign_floating_ips(vm_list[0], True)
            LOG.debug("Floating ip assigned to selective restore vm -> " +
                      str(floating_ip_2))
            md5sums_dir_after = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_2))
            self.execute_command_disk_mount(ssh, str(floating_ip_2),
                                            [volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_dir_after = self.calcmd5sum(floating_ip_2, mount_points[0])
            ssh.close()

            LOG.debug("MD5SUMS before restore")
            LOG.debug(md5sums_dir_before[str(floating_ip_1)])
            LOG.debug("MD5SUMS after restore")
            LOG.debug(md5sums_dir_after[str(floating_ip_2)])

            if md5sums_dir_before[str(floating_ip_1)] == md5sums_dir_after[str(
                    floating_ip_2)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            for id in range(len(vm_list)):
                restored_vm_details.append(self.get_vm_details(vm_list[id]))
            LOG.debug("Restored vm details list: " + str(restored_vm_details))

            vms_details_after_restore = self.get_vms_details_list(
                restored_vm_details)
            LOG.debug("VM details after restore: " +
                      str(vms_details_after_restore))
            #Compare the data before and after restore
            for i in range(len(vms_details_after_restore)):
                if (vms_details_after_restore[i]['network_name'] ==
                        int_net_1_name):
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.PASS)
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error(
                        "Restored network: " +
                        str(vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)

            ### In-place restore ###

            #Create in-place restore with CLI command
            restore_command = command_argument_string.inplace_restore + str(
                tvaultconf.restore_filename) + " " + str(incr_snapshot_id)

            LOG.debug("inplace restore cli command: " + str(restore_command))
            #Restore.json with only volume 2 excluded
            restore_json = json.dumps({
                'openstack': {
                    'instances': [{
                        'restore_boot_disk':
                        True,
                        'include':
                        True,
                        'id':
                        vm_id,
                        'vdisks': [{
                            'restore_cinder_volume': True,
                            'id': volume_id,
                            'new_volume_type': CONF.volume.volume_type
                        }],
                    }],
                    'networks_mapping': {
                        'networks': []
                    }
                },
                'restore_type': 'inplace',
                'type': 'openstack'
            })
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            #Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(json.loads(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #get restore id from database
            restore_id_2 = query_data.get_snapshot_restore_id(incr_snapshot_id)
            self.wait_for_snapshot_tobe_available(workload_id,
                                                  incr_snapshot_id)

            #get in-place restore status
            if (self.getRestoreStatus(workload_id, incr_snapshot_id,
                                      restore_id_2) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            #Fetch instance details after restore
            vm_list = []
            vm_list = self.get_restored_vm_list(restore_id_2)
            LOG.debug("Restored vm(In-place) ID : " + str(vm_list))

            time.sleep(40)
            md5sums_dir_after = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.execute_command_disk_mount(ssh, str(floating_ip_1),
                                            [volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_dir_after = self.calcmd5sum(floating_ip_1, mount_points[0])
            ssh.close()

            LOG.debug("<----md5sums_dir_before---->")
            LOG.debug(md5sums_dir_before[str(floating_ip_1)])
            LOG.debug("<----md5sums_dir_after---->")
            LOG.debug(md5sums_dir_after[str(floating_ip_1)])

            if md5sums_dir_before[str(floating_ip_1)] == md5sums_dir_after[str(
                    floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            #Delete restore for snapshot
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id,
                                incr_snapshot_id, restore_id_2)

            ### One-click restore ###

            mdb = self.calcmd5sum(floating_ip_1, mount_points[0])
            LOG.debug(
                "MD5SUMS before deleting the instance for one click restore : "
                + str(mdb))

            self.disassociate_floating_ip_from_server(floating_ip_1, vm_id)
            self.detach_volume(vm_id, volume_id)

            #Delete the original instance
            self.delete_vm(vm_id)
            LOG.debug(
                "Instance deleted successfully for one click restore : " +
                str(vm_id))
            time.sleep(10)

            #Delete bootable volume of original instance
            self.delete_volume(boot_volume_id)
            LOG.debug("Bootable volume of original instance deleted")

            #Delete volume attached to original instance
            self.delete_volume(volume_id)
            LOG.debug("Volumes deleted successfully for one click restore : " +
                      str(volume_id))

            deleted = 1

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            restore_id_3 = query_data.get_snapshot_restore_id(snapshot_id)
            LOG.debug("Restore ID: " + str(restore_id_3))

            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if (self.getRestoreStatus(workload_id, snapshot_id,
                                      restore_id_3) == "available"):
                reporting.add_test_step("One-click restore", tvaultconf.PASS)
                LOG.debug("One-click restore passed")
            else:
                reporting.add_test_step("One-click restore", tvaultconf.FAIL)
                LOG.debug("One-click restore failed")
                raise Exception("One-click restore failed")
            LOG.debug("One-click restore complete")

            #Fetch instance details after restore
            vm_list = []
            vm_list = self.get_restored_vm_list(restore_id_3)
            LOG.debug("Restored vms : " + str(vm_list))

            mda = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.execute_command_disk_mount(ssh, str(floating_ip_1),
                                            [volumes[0]], [mount_points[0]])
            time.sleep(5)
            mda = self.calcmd5sum(floating_ip_1, mount_points[0])
            LOG.debug(
                "MD5SUMS after deleting the instance for one click restore : "
                + str(mda))
            ssh.close()

            if mdb[str(floating_ip_1)] == mda[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            restored_volumes = []
            restored_volumes = self.get_restored_volume_list(restore_id_3)
            LOG.debug("Restored volumes : ")
            LOG.debug(restored_volumes)

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, snapshot_id,
                                restore_id_3)
                time.sleep(30)
                self.addCleanup(self.delete_restored_vms, vm_list,
                                restored_volumes)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            if (deleted == 0):
                self.disassociate_floating_ip_from_server(floating_ip_1, vm_id)
                self.detach_volume(vm_id, volume_id)
                self.delete_vm(vm_id)
                time.sleep(10)
                self.delete_volume(volume_id)
                self.delete_volume(boot_volume_id)
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_tvault1034_create_scheduled_workload(self):
        try:
            #Prerequisites
            self.created = False
            #Launch instance
            self.vm_id = self.create_vm()
            LOG.debug("VM ID: " + str(self.vm_id))

            #Create volume
            self.volume_id = self.create_volume()
            LOG.debug("Volume ID: " + str(self.volume_id))

            #Attach volume to the instance
            self.attach_volume(self.volume_id, self.vm_id)
            LOG.debug("Volume attached")

            #Create workload with CLI command
            self.start_date = time.strftime("%x")
            self.start_time = time.strftime("%I:%M %p")
            interval = tvaultconf.interval
            retention_policy_type = tvaultconf.retention_policy_type
            retention_policy_value = tvaultconf.retention_policy_value
            workload_create = command_argument_string.workload_create + " --instance instance-id=" +str(self.vm_id)\
                + " --jobschedule start_date=" + str(self.start_date) + " --jobschedule start_time='" + str(self.start_time)\
                + "' --jobschedule interval='" + str(interval) + "' --jobschedule retention_policy_type='"\
                + str(retention_policy_type) + "' --jobschedule retention_policy_value=" + str(retention_policy_value)\
         + " --jobschedule enabled=True"
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-create command with scheduler enabled",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-create command with scheduler enabled",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")
            time.sleep(10)
            self.wid = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(self.wid))
            self.wait_for_workload_tobe_available(self.wid)
            if (self.getWorkloadStatus(self.wid) == "available"):
                reporting.add_test_step("Create scheduled workload",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step("Create scheduled workload",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            self.schedule = self.getSchedulerStatus(self.wid)
            LOG.debug("Workload schedule: " + str(self.schedule))
            if (self.schedule):
                reporting.add_test_step("Verification", tvaultconf.PASS)
                LOG.debug("Workload schedule enabled")
            else:
                reporting.add_test_step("Verification", tvaultconf.FAIL)
                LOG.error("Workload schedule not enabled")

            #Cleanup
            #Delete workload
            self.workload_delete(self.wid)
            LOG.debug("Workload deleted successfully")
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_tvault_rbac_nonadmin_notableto(self):
        try:
            # Use non-admin credentials
            os.environ['OS_USERNAME'] = CONF.identity.nonadmin_user
            os.environ['OS_PASSWORD'] = CONF.identity.nonadmin_password

            # Run get_storage_usage CLI
            get_storage_usage = command_argument_string.get_storage_usage
            LOG.debug("get_storage_usage  command: " + str(get_storage_usage))
            rc = cli_parser.cli_returncode(get_storage_usage)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute get_storage_usage command ",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command  get_storage_usage did not execute correctly")
            else:
                reporting.add_test_step(
                    "Can not execute get_storage_usage command",
                    tvaultconf.FAIL)
                raise Exception("Command get_storage_usage executed correctly")

            # Run get_import_workloads_list CLI
            get_import_workloads_list = command_argument_string.get_import_workloads_list
            LOG.debug("get_import_workloads_list command: " +
                      str(get_import_workloads_list))
            rc = cli_parser.cli_returncode(get_import_workloads_list)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute get_import_workloads_list command ",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command get_import_workloads_list did not execute correctly"
                )
            else:
                reporting.add_test_step(
                    "Can not execute get_import_workloads_list command",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command get_import_workloads_list executed correctly")

    # Run workload_disable_global_job_scheduler CLI
            workload_disable_global_job_scheduler = command_argument_string.workload_disable_global_job_scheduler
            LOG.debug("workload_disable_global_job_scheduler command: " +
                      str(workload_disable_global_job_scheduler))
            rc = cli_parser.cli_returncode(get_import_workloads_list)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute workload_disable_global_job_scheduler command ",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command workload_disable_global_job_scheduler did not execute correctly"
                )
            else:
                reporting.add_test_step(
                    "Can not execute workload_disable_global_job_scheduler command",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload_disable_global_job_scheduler executed correctly"
                )

            # Run workload_enable_global_job_scheduler CLI
            workload_enable_global_job_scheduler = command_argument_string.workload_enable_global_job_scheduler
            LOG.debug("workload_enable_global_job_scheduler command: " +
                      str(workload_enable_global_job_scheduler))
            rc = cli_parser.cli_returncode(
                workload_enable_global_job_scheduler)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute workload_enable_global_job_scheduler command ",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command workload_enable_global_job_scheduler did not execute correctly"
                )
            else:
                reporting.add_test_step(
                    "Can not execute workload_enable_global_job_scheduler command",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload_enable_global_job_scheduler executed correctly"
                )

    # Run get_nodes CLI
            get_nodes = command_argument_string.get_nodes
            LOG.debug("get_nodes command: " + str(get_nodes))
            rc = cli_parser.cli_returncode(get_nodes)
            if rc != 0:
                reporting.add_test_step("Can not execute get_nodes command ",
                                        tvaultconf.PASS)
                LOG.debug("Command get_nodes did not execute correctly")
            else:
                reporting.add_test_step("Can not execute get_nodes command",
                                        tvaultconf.FAIL)
                raise Exception("Command get_nodes executed correctly")

            # Run license_check CLI
            license_check = command_argument_string.license_check
            LOG.debug("license_check command: " + str(license_check))
            rc = cli_parser.cli_returncode(license_check)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute license_check command ", tvaultconf.PASS)
                LOG.debug("Command license_check did not execute correctly")
            else:
                reporting.add_test_step(
                    "Can not execute license_check command", tvaultconf.FAIL)
                raise Exception("Command license_check executed correctly")

            # Run license_list CLI
            license_list = command_argument_string.license_list
            LOG.debug("license_list  command: " + str(license_list))
            rc = cli_parser.cli_returncode(license_list)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute license_list command ", tvaultconf.PASS)
                LOG.debug("Command  license_list did not execute correctly")
            else:
                reporting.add_test_step("Can not execute license_list command",
                                        tvaultconf.FAIL)
                raise Exception("Command  license_list executed correctly")

            reporting.set_test_script_status(tvaultconf.PASS)
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
示例#28
0
    def test_tvault_rbac_backuprole_touser_policyjson(self):
        try:
            # Change policy.json file on tvault to change role and rule
            self.change_policyjson_file("backup", "backup_api")
            self.instances_id = []

            # Create volume, Launch an Instance
            self.volumes_id = self.create_volume(volume_cleanup=False)
            LOG.debug("Volume-1 ID: " + str(self.volumes_id))
            self.instances_id.append(self.create_vm(vm_cleanup=False))
            LOG.debug("VM-1 ID: " + str(self.instances_id[0]))
            self.attach_volume(self.volumes_id, self.instances_id[0])
            LOG.debug("Volume attached")

            # Use backupuser credentials
            os.environ['OS_USERNAME'] = CONF.identity.backupuser
            os.environ['OS_PASSWORD'] = CONF.identity.backupuser_password

            # Create workload with CLI by backup role
            workload_create = command_argument_string.workload_create + " --instance instance-id=" + str(
                self.instances_id[0])
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                LOG.debug("workload creation unsuccessful by backup role")
                raise Exception(
                    "RBAC policy fails for workload creation by backup role")
            else:
                LOG.debug("Workload created successfully by backup role")
                reporting.add_test_step(
                    "Execute workload_create command by backup role",
                    tvaultconf.PASS)
                time.sleep(10)
                self.wid1 = query_data.get_workload_id(
                    tvaultconf.workload_name)
                workload_available = self.wait_for_workload_tobe_available(
                    self.wid1)

    # Run snapshot_create CLI by backup role
            snapshot_create = command_argument_string.snapshot_create + str(
                self.wid1)
            LOG.debug("snapshot_create command: " + str(snapshot_create))
            rc = cli_parser.cli_returncode(snapshot_create)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot_create command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_create did not execute correctly by backup role"
                )
            else:
                reporting.add_test_step(
                    "Execute snapshot_create command by backup role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_create executed correctly by backup role"
                )
                self.snapshot_id1 = query_data.get_inprogress_snapshot_id(
                    self.wid1)
                wc = self.wait_for_snapshot_tobe_available(
                    self.wid1, self.snapshot_id1)

            #Delete the original instance
            self.delete_vm(self.instances_id[0])
            LOG.debug("Instance deleted successfully for restore")

            #Delete corresponding volume
            self.delete_volume(self.volumes_id)
            LOG.debug("Volume deleted successfully for restore")

            #Create one-click restore using CLI command by backup role
            restore_command = command_argument_string.oneclick_restore + " " + str(
                self.snapshot_id1)
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command one-click restore did not execute correctly by backup role"
                )
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command by backup role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command one-click restore executed correctly backup role")
                wc = self.wait_for_snapshot_tobe_available(
                    self.wid1, self.snapshot_id1)
                self.restore_id1 = query_data.get_snapshot_restore_id(
                    self.snapshot_id1)
                LOG.debug("Restore ID: " + str(self.restore_id1))
                self.restore_vm_id1 = self.get_restored_vm_list(
                    self.restore_id1)
                LOG.debug("Restore VM ID: " + str(self.restore_vm_id1))
                self.restore_volume_id1 = self.get_restored_volume_list(
                    self.restore_id1)
                LOG.debug("Restore Volume ID: " + str(self.restore_volume_id1))

            # Use admin credentials
            os.environ['OS_USERNAME'] = CONF.identity.username
            os.environ['OS_PASSWORD'] = CONF.identity.password

            # Create workload with CLI by admin role
            workload_create = command_argument_string.workload_create + " --instance instance-id=" + str(
                self.restore_vm_id1)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                LOG.debug(
                    "Command workload_create did not execute correctly by admin role"
                )
                reporting.add_test_step(
                    "Can not execute workload_create command by admin role",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Command workload_create did not execute correctly by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload_create executed correctly by admin role")

            # Run snapshot_create CLI by admin role
            snapshot_create = command_argument_string.snapshot_create + str(
                self.wid1)
            LOG.debug("snapshot_create command: " + str(snapshot_create))
            rc = cli_parser.cli_returncode(snapshot_create)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute snapshot_create command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_create did not execute correctly by admin role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute snapshot_create command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_create executed correctly by admin role")

            #Create one-click restore using CLI command by admin role
            restore_command = command_argument_string.oneclick_restore + " " + str(
                self.snapshot_id1)
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute restore_create command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_create did not execute correctly by admin role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute restore_create command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command restore_create executed correctly by admin role")

            # Run restore_delete CLI by admin role
            restore_delete = command_argument_string.restore_delete + str(
                self.restore_id1)
            rc = cli_parser.cli_returncode(restore_delete)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute restore_delete command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_delete did not execute correctly by admin role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute restore_delete command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command restore_delete executed correctly by admin role")

            # Run snapshot_delete CLI by admin role
            snapshot_delete = command_argument_string.snapshot_delete + str(
                self.snapshot_id1)
            rc = cli_parser.cli_returncode(snapshot_delete)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute snapshot_delete command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_delete did not execute correctly by admin role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute snapshot_delete command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_delete executed correctly by admin role")

            # Delete workload with CLI by admin role
            workload_delete = command_argument_string.workload_delete + str(
                self.wid1)
            rc = cli_parser.cli_returncode(workload_delete)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute workload_delete command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command workload_delete did not execute correctly by admin role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute workload_delete command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload_delete executed correctly by admin role")

            # Use nonadmin credentials
            os.environ['OS_USERNAME'] = CONF.identity.nonadmin_user
            os.environ['OS_PASSWORD'] = CONF.identity.nonadmin_password

            # Create workload with CLI by default role
            workload_create = command_argument_string.workload_create + " --instance instance-id=" + str(
                self.restore_vm_id1)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                LOG.debug(
                    "Command workload_create did not execute correctly by default role"
                )
                reporting.add_test_step(
                    "Can not execute workload_create command by default role",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Can not execute workload_create command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload_create executed correctly by default role"
                )

            # Run snapshot_create CLI by default role
            snapshot_create = command_argument_string.snapshot_create + str(
                self.wid1)
            rc = cli_parser.cli_returncode(snapshot_create)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute snapshot_create command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_create did not execute correctly by default role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute snapshot_create command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_create executed correctly by default role"
                )

            # Create one-click restore using CLI by default role
            restore_command = command_argument_string.oneclick_restore + " " + str(
                self.snapshot_id1)
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute restore_create command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_create did not execute correctly by default role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute restore_create command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command restore_create executed correctly by default role"
                )

            # Run restore_delete CLI by default role
            restore_delete = command_argument_string.restore_delete + str(
                self.restore_id1)
            rc = cli_parser.cli_returncode(restore_delete)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute restore_delete command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_delete did not execute correctly by default role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute restore_delete command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command restore_delete executed correctly by default role"
                )

    # Run snapshot_delete CLI by default role
            snapshot_delete = command_argument_string.snapshot_delete + str(
                self.snapshot_id1)
            LOG.debug("snapshot_delete command: " + str(snapshot_create))
            rc = cli_parser.cli_returncode(snapshot_delete)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute snapshot_delete command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_delete did not execute correctly by default role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute snapshot_delete command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_delete executed correctly by default role"
                )

            # Delete workload with CLI by default role
            workload_delete = command_argument_string.workload_delete + str(
                self.wid1)
            rc = cli_parser.cli_returncode(workload_delete)
            if rc != 0:
                reporting.add_test_step(
                    "Can not execute workload_delete command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command workload_delete did not execute correctly by default role"
                )
            else:
                reporting.add_test_step(
                    "Can not execute workload_delete command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload_delete executed correctly by default role"
                )

            # Use backupuser credentials
            os.environ['OS_USERNAME'] = CONF.identity.backupuser
            os.environ['OS_PASSWORD'] = CONF.identity.backupuser_password

            # Run restore_delete CLI by backup role
            restore_delete = command_argument_string.restore_delete + str(
                self.restore_id1)
            rc = cli_parser.cli_returncode(restore_delete)
            if rc != 0:
                reporting.add_test_step(
                    "Execute  restore_delete command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command  restore_delete did not execute correctly by backup role"
                )
            else:
                reporting.add_test_step(
                    "Execute restore_delete command by backup role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_delete executed correctly by backup role")
                wc = self.wait_for_snapshot_tobe_available(
                    self.wid1, self.snapshot_id1)
                #Delete restored VM instance and volume
                self.delete_restored_vms(self.restore_vm_id1,
                                         self.restore_volume_id1)
                LOG.debug("Restored VMs deleted successfully by backup role")

    # Run snapshot_delete CLI by backup role
            snapshot_delete = command_argument_string.snapshot_delete + str(
                self.snapshot_id1)
            LOG.debug("snapshot_delete command: " + str(snapshot_create))
            rc = cli_parser.cli_returncode(snapshot_delete)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot_delete command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_delete did not execute correctly by backup role"
                )
            else:
                reporting.add_test_step(
                    "Execute snapshot_delete command by backup role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_delete executed correctly by backup role"
                )
                workload_available = self.wait_for_workload_tobe_available(
                    self.wid1)

            # Delete workload with CLI by backup role
            workload_delete = command_argument_string.workload_delete + str(
                self.wid1)
            rc = cli_parser.cli_returncode(workload_delete)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload_delete command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "RBAC policy fails for workload deletion by backup role")
            else:
                LOG.debug("Workload deleted successfully by backup role")
                reporting.add_test_step(
                    "Execute workload_delete command by backup role",
                    tvaultconf.PASS)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
示例#29
0
    def test_tvault_inplace_cli_delete_vm(self):
        try:

            volumes = ["/dev/vdb", "/dev/vdc"]
            mount_points = ["mount_data_b", "mount_data_c"]

            #Fill some data on each of the volumes attached
            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[0]))
            self.addCustomSizedfilesOnLinux(ssh, mount_points[0], 1)
            ssh.close()

            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[1]))
            self.addCustomSizedfilesOnLinux(ssh, mount_points[0], 1)
            self.addCustomSizedfilesOnLinux(ssh, mount_points[1], 1)
            ssh.close()

            #Fill some more data on each volume attached
            tree = lambda: collections.defaultdict(tree)
            self.md5sums_dir_before = tree()

            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[0]))
            self.md5sums_dir_before[str(self.floating_ips_list[0])][str(
                mount_points[0])] = self.calculatemmd5checksum(
                    ssh, mount_points[0])
            ssh.close()

            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[1]))
            self.md5sums_dir_before[str(self.floating_ips_list[1])][str(
                mount_points[0])] = self.calculatemmd5checksum(
                    ssh, mount_points[0])
            self.md5sums_dir_before[str(self.floating_ips_list[1])][str(
                mount_points[1])] = self.calculatemmd5checksum(
                    ssh, mount_points[1])
            ssh.close()

            LOG.debug("md5sums_dir_before" + str(self.md5sums_dir_before))

            #delete vm and delete on volume
            self.delete_vm(self.workload_instances[0])
            self.delete_volume(self.volumes_list[1])

            #Create in-place restore with CLI command
            restore_command = command_argument_string.inplace_restore + str(
                tvaultconf.restore_filename) + " " + str(self.incr_snapshot_id)

            LOG.debug("inplace restore cli command: " + str(restore_command))
            #Restore.json with only volume 2 excluded
            restore_json = json.dumps({
                'openstack': {
                    'instances': [{
                        'restore_boot_disk': True,
                        'include': False,
                        'id': self.workload_instances[0],
                        'vdisks': []
                    }, {
                        'restore_boot_disk':
                        True,
                        'include':
                        True,
                        'id':
                        self.workload_instances[1],
                        'vdisks': [{
                            'restore_cinder_volume': True,
                            'id': self.volumes_list[2],
                            'new_volume_type': CONF.volume.volume_type
                        }]
                    }],
                    'networks_mapping': {
                        'networks': []
                    }
                },
                'restore_type': 'inplace',
                'type': 'openstack'
            })
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            #Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(yaml.safe_load(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #get restore id from database
            self.restore_id = query_data.get_snapshot_restore_id(
                self.incr_snapshot_id)
            self.wait_for_snapshot_tobe_available(self.workload_id,
                                                  self.incr_snapshot_id)

            #get in-place restore status
            if (self.getRestoreStatus(self.workload_id, self.incr_snapshot_id,
                                      self.restore_id) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            # mount volumes after restore
            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[1]))
            self.execute_command_disk_mount(ssh,
                                            str(self.floating_ips_list[1]),
                                            volumes, mount_points)
            ssh.close()

            # calculate md5 after inplace restore
            tree = lambda: collections.defaultdict(tree)
            md5_sum_after_in_place_restore = tree()

            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[1]))
            md5_sum_after_in_place_restore[str(self.floating_ips_list[1])][str(
                mount_points[0])] = self.calculatemmd5checksum(
                    ssh, mount_points[0])
            md5_sum_after_in_place_restore[str(self.floating_ips_list[1])][str(
                mount_points[1])] = self.calculatemmd5checksum(
                    ssh, mount_points[1])
            ssh.close()

            LOG.debug("md5_sum_after_in_place_restore" +
                      str(md5_sum_after_in_place_restore))

            # md5 sum verification
            if self.md5sums_dir_before[str(self.floating_ips_list[1])][str(
                    mount_points[0])] == md5_sum_after_in_place_restore[str(
                        self.floating_ips_list[1])][str(mount_points[0])]:
                reporting.add_test_step("Md5 Verification for volume 1",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step("Md5 Verification for volume 1",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            if self.md5sums_dir_before[str(self.floating_ips_list[1])][str(
                    mount_points[1])] != md5_sum_after_in_place_restore[str(
                        self.floating_ips_list[1])][str(mount_points[1])]:
                reporting.add_test_step("Md5 Verification for volume 2",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step("Md5 Verification for volume 2",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            #Delete restore for snapshot
            self.restored_volumes = self.get_restored_volume_list(
                self.restore_id)
            if tvaultconf.cleanup == True:
                self.restore_delete(self.workload_id, self.incr_snapshot_id,
                                    self.restore_id)
                LOG.debug("Snapshot Restore deleted successfully")

                #Delete restored volumes and volume snapshots
                self.delete_volumes(self.restored_volumes)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_tvault1039_delete_workload(self):
        try:
            # Prerequisites
            self.deleted = False
            self.workload_instances = []
            # Launch instance
            self.vm_id = self.create_vm()
            LOG.debug("VM ID: " + str(self.vm_id))

            # Create volume
            self.volume_id = self.create_volume()
            LOG.debug("Volume ID: " + str(self.volume_id))

            # Attach volume to the instance
            self.attach_volume(self.volume_id, self.vm_id)
            LOG.debug("Volume attached")

            # Create workload
            self.workload_instances.append(self.vm_id)
            self.wid = self.workload_create(
                self.workload_instances,
                tvaultconf.parallel,
                workload_name=tvaultconf.workload_name,
                workload_cleanup=False)
            LOG.debug("Workload ID: " + str(self.wid))
            time.sleep(5)

            # Delete workload from CLI command
            rc = cli_parser.cli_returncode(
                command_argument_string.workload_delete + str(self.wid))
            if rc != 0:
                reporting.add_test_step("Execute workload-delete command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-delete command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            wc = query_data.get_deleted_workload(self.wid)
            LOG.debug("Workload status: " + str(wc))
            if (str(wc) == "deleted"):
                reporting.add_test_step("Verification with DB",
                                        tvaultconf.PASS)
                LOG.debug("Workload successfully deleted")
                self.deleted = True
            else:
                while (str(wc) != "deleted"):
                    time.sleep(5)
                    wc = query_data.get_deleted_workload(self.wid)
                    LOG.debug("Workload status: " + str(wc))
                    if (str(wc) == "deleted"):
                        reporting.add_test_step("Verification with DB",
                                                tvaultconf.PASS)
                        LOG.debug("Workload successfully deleted")
                        self.deleted = True
                        break
            if (self.deleted == False):
                reporting.add_test_step("Verification with DB",
                                        tvaultconf.FAIL)
                raise Exception("Workload did not get deleted")
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()