def test_1_volume_volume(self):
        try:
            ### VM and Workload ###
            tests = [['tempest.api.workloadmgr.restore.test_volume_vol_Selective-restore',
                      0],
                     ['tempest.api.workloadmgr.restore.test_volume_vol_Inplace-restore',
                      0],
                     ['tempest.api.workloadmgr.restore.test_volume_vol_Oneclick-restore',
                      0]]
            reporting.add_test_script(tests[0][0])
            deleted = 0
            global volumes
            mount_points = ["mount_data_a", "mount_data_b"]
            md5sums_before_full = {}

            # Create Keypair
            kp = self.create_key_pair(
                tvaultconf.key_pair_name, keypair_cleanup=True)
            LOG.debug("Key_pair : " + str(kp))

            # Create bootable volume
            boot_volume_id = self.create_volume(
                size=tvaultconf.bootfromvol_vol_size,
                image_id=CONF.compute.image_ref,
                volume_cleanup=False)
            self.set_volume_as_bootable(boot_volume_id)
            LOG.debug("Bootable Volume ID : " + str(boot_volume_id))

            self.block_mapping_details = [{"source_type": "volume",
                                           "delete_on_termination": "false",
                                           "boot_index": 0,
                                           "uuid": boot_volume_id,
                                           "destination_type": "volume"}]

            # Create instance
            vm_id = self.create_vm(
                key_pair=kp,
                image_id="",
                block_mapping_data=self.block_mapping_details,
                vm_cleanup=False)
            LOG.debug("VM ID : " + str(vm_id))
            time.sleep(30)

            # Create and attach volume
            volume_id = self.create_volume(
                volume_type_id=CONF.volume.volume_type_id,
                volume_cleanup=False)
            LOG.debug("Volume ID: " + str(volume_id))
            volumes = tvaultconf.volumes_parts

            self.attach_volume(volume_id, vm_id, attach_cleanup=False)
            LOG.debug("Volume attached")

            # Assign floating IP
            floating_ip_1 = self.assign_floating_ips(vm_id, False)
            LOG.debug("Assigned floating IP : " + str(floating_ip_1))
            LOG.debug("Sleeping for 40 sec")
            time.sleep(40)

            if CONF.validation.ssh_user == 'ubuntu':
                self.install_qemu_ga(floating_ip_1)

            # Adding data and calculating md5sums
            self.data_ops(floating_ip_1, mount_points[0], 3)
            LOG.debug("Created disk and mounted the attached volume")

            md5sums_before_full = self.calcmd5sum(
                floating_ip_1, mount_points[0])
            LOG.debug("\nMD5SUM of the data before full snapshot : {}\n".format(
                md5sums_before_full))

            workload_create = command_argument_string.workload_create + \
                " --instance instance-id=" + str(vm_id)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-create command", tvaultconf.FAIL)
                raise Exception(
                    "Workload-create command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-create command", tvaultconf.PASS)
                LOG.debug("Workload-create command executed correctly")

            time.sleep(10)
            workload_id = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(workload_id))
            if(workload_id is not None):
                self.wait_for_workload_tobe_available(workload_id)
                if(self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)

            if (tvaultconf.cleanup):
                self.addCleanup(self.workload_delete, workload_id)

            ### Full Snapshot ###

            snapshot_id = self.create_snapshot(workload_id, is_full=True)

            # Add some more data to files on VM
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.addCustomfilesOnLinuxVM(ssh, mount_points[0], 2)
            ssh.close()
            md5sums_before_incremental = {}
            md5sums_before_incremental = self.calcmd5sum(
                floating_ip_1, mount_points[0])
            LOG.debug("\nMD5SUM after adding additional data before incremental snapshot : {}\n".format(
                md5sums_before_incremental))

            ### Incremental snapshot ###

            incr_snapshot_id = self.create_snapshot(workload_id, is_full=False)

            ### Selective restore ###

            rest_details = {}
            rest_details['rest_type'] = 'selective'
            rest_details['network_id'] = CONF.network.internal_network_id
            rest_details['subnet_id'] = self.get_subnet_id(
                CONF.network.internal_network_id)
            volumeslist = [boot_volume_id, volume_id]
            rest_details['instances'] = {vm_id: volumeslist}

            payload = self.create_restore_json(rest_details)
            # Trigger selective restore
            restore_id_1 = self.snapshot_selective_restore(
                workload_id,
                snapshot_id,
                restore_name=tvaultconf.restore_name,
                restore_cleanup=True,
                instance_details=payload['instance_details'],
                network_details=payload['network_details'])
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if(self.getRestoreStatus(workload_id, snapshot_id, restore_id_1) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                raise Exception("Selective restore failed")

            # Fetch instance details after restore
            vm_list = self.get_restored_vm_list(restore_id_1)
            LOG.debug("Restored vm(selective) ID : " + str(vm_list))
            time.sleep(60)
            floating_ip_2 = self.assign_floating_ips(vm_list[0], True)
            LOG.debug(
                "Floating ip assigned to selective restore vm -> " +
                str(floating_ip_2))
            md5sums_after_selective = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_2))
            self.execute_command_disk_mount(ssh, str(floating_ip_2), [
                                            volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_after_selective = self.calcmd5sum(
                floating_ip_2, mount_points[0])
            ssh.close()

            LOG.debug("MD5SUMS before restore")
            LOG.debug(md5sums_before_full[str(floating_ip_1)])
            LOG.debug("MD5SUMS after selective restore")
            LOG.debug(md5sums_after_selective[str(floating_ip_2)])

            if md5sums_before_full[str(
                floating_ip_1)] == md5sums_after_selective[str(floating_ip_2)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step(
                    "Md5 Verification for volume", tvaultconf.PASS)
                reporting.set_test_script_status(tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step(
                    "Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            restored_vm_details = []
            for id in range(len(vm_list)):
                restored_vm_details.append(self.get_vm_details(vm_list[id]))
            LOG.debug("Restored vm details list: " + str(restored_vm_details))

            vms_details_after_restore = self.get_vms_details_list(
                restored_vm_details)
            LOG.debug("VM details after restore: " +
                      str(vms_details_after_restore))
            # Compare the data before and after restore
            int_net_1_name = self.get_net_name(
                CONF.network.internal_network_id)
            for i in range(len(vms_details_after_restore)):
                if(vms_details_after_restore[i]['network_name'] == int_net_1_name):
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1), tvaultconf.PASS)
                    tests[0][1] = 1
                    reporting.test_case_to_write()
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error("Restored network: " +
                              str(vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1), tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
                    reporting.test_case_to_write()

            ### In-place restore ###

            rest_details = {}
            rest_details['rest_type'] = 'inplace'
            rest_details['instances'] = {vm_id: volumeslist}

            reporting.add_test_script(tests[1][0])
            # Create in-place restore with CLI command
            restore_command = command_argument_string.inplace_restore + \
                str(tvaultconf.restore_filename) + " " + str(snapshot_id)
            payload = self.create_restore_json(rest_details)
            restore_json = json.dumps(payload)
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            # Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(yaml.safe_load(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Triggering In-Place restore via CLI", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Triggering In-Place restore via CLI", tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            # get restore id from database
            restore_id_2 = query_data.get_snapshot_restore_id(snapshot_id)
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)

            # get in-place restore status
            if(self.getRestoreStatus(workload_id, snapshot_id, restore_id_2) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            # Fetch instance details after restore
            vm_list = []
            vm_list = self.get_restored_vm_list(restore_id_2)
            LOG.debug("Restored vm(In-place) ID : " + str(vm_list))

            time.sleep(60)
            md5sums_after_inplace = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.execute_command_disk_mount(ssh, str(floating_ip_1), [
                                            volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_after_inplace = self.calcmd5sum(
                floating_ip_1, mount_points[0])
            ssh.close()

            LOG.debug("<----md5sums_before_full---->")
            LOG.debug(md5sums_before_full[str(floating_ip_1)])
            LOG.debug("<----md5sums_after_inplace---->")
            LOG.debug(md5sums_after_inplace[str(floating_ip_1)])

            if md5sums_before_full[str(
                floating_ip_1)] == md5sums_after_inplace[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step(
                    "Md5 Verification for volume", tvaultconf.PASS)
                tests[1][1] = 1
                reporting.set_test_script_status(tvaultconf.PASS)
                reporting.test_case_to_write()
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step(
                    "Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.test_case_to_write()

            # Delete restore for snapshot
            if (tvaultconf.cleanup):
                self.addCleanup(self.restore_delete, workload_id,
                                snapshot_id, restore_id_2)

            ### One-click restore ###

            reporting.add_test_script(tests[2][0])

            self.detach_volume(vm_id, volume_id)

            # Delete the original instance
            self.delete_vm(vm_id)
            LOG.debug(
                "Instance deleted successfully for one click restore : " +
                str(vm_id))
            time.sleep(10)

            # Delete bootable volume of original instance
            self.delete_volume(boot_volume_id)
            LOG.debug("Bootable volume of original instance deleted")

            # Delete volume attached to original instance
            self.delete_volume(volume_id)
            LOG.debug(
                "Volumes deleted successfully for one click restore : " +
                str(volume_id))

            deleted = 1

            # Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + incr_snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            restore_id_3 = query_data.get_snapshot_restore_id(incr_snapshot_id)
            LOG.debug("Restore ID: " + str(restore_id_3))

            self.wait_for_snapshot_tobe_available(
                workload_id, incr_snapshot_id)
            if(self.getRestoreStatus(workload_id, incr_snapshot_id, restore_id_3) == "available"):
                reporting.add_test_step("One-click restore", tvaultconf.PASS)
                LOG.debug("One-click restore passed")
            else:
                reporting.add_test_step("One-click restore", tvaultconf.FAIL)
                LOG.debug("One-click restore failed")
                raise Exception("One-click restore failed")
            LOG.debug("One-click restore complete")

            # Fetch instance details after restore
            vm_list = []
            vm_list = self.get_restored_vm_list(restore_id_3)
            LOG.debug("Restored vms : " + str(vm_list))

            md5sums_after_1clickrestore = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.execute_command_disk_mount(ssh, str(floating_ip_1), [
                                            volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_after_1clickrestore = self.calcmd5sum(
                floating_ip_1, mount_points[0])
            LOG.debug("MD5SUMS after one click restore : {}".format(
                md5sums_after_1clickrestore))
            ssh.close()

            if md5sums_before_incremental[str(
                floating_ip_1)] == md5sums_after_1clickrestore[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step(
                    "Md5 Verification for volume", tvaultconf.PASS)
                tests[2][0] = 1
                reporting.set_test_script_status(tvaultconf.PASS)
                reporting.test_case_to_write()
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step(
                    "Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.test_case_to_write()

            restored_volumes = []
            restored_volumes = self.get_restored_volume_list(restore_id_3)
            LOG.debug("Restored volumes : ")
            LOG.debug(restored_volumes)

            if (tvaultconf.cleanup):
                self.addCleanup(self.restore_delete, workload_id,
                                incr_snapshot_id, restore_id_3)
                time.sleep(30)
                self.addCleanup(self.delete_restored_vms,
                                vm_list, restored_volumes)

        except Exception as e:
            LOG.error("Exception: " + str(e))
            if (deleted == 0):
                try:
                    self.delete_vm(vm_id)
                except BaseException:
                    pass
                time.sleep(10)
                try:
                    self.delete_volume(volume_id)
                    self.delete_volume(boot_volume_id)
                except BaseException:
                    pass
            for test in tests:
                if test[1] != 1:
                    reporting.add_test_script(test[0])
                    reporting.set_test_script_status(tvaultconf.FAIL)
                    reporting.test_case_to_write()
Example #2
0
    def test_1_volume_volume(self):
        try:
            ### VM and Workload ###

            reporting.add_test_script(str(__name__))

            deleted = 0
            global volumes
            mount_points = ["mount_data_a", "mount_data_b"]
            md5sums_dir_before = {}

            #Create Keypair
            kp = self.create_key_pair(tvaultconf.key_pair_name,
                                      keypair_cleanup=True)
            LOG.debug("Key_pair : " + str(kp))

            #Create bootable volume
            boot_volume_id = self.create_volume(
                image_id=CONF.compute.image_ref, volume_cleanup=False)
            self.set_volume_as_bootable(boot_volume_id)
            LOG.debug("Bootable Volume ID : " + str(boot_volume_id))

            self.block_mapping_details = [{
                "source_type": "volume",
                "delete_on_termination": "false",
                "boot_index": 0,
                "uuid": boot_volume_id,
                "destination_type": "volume"
            }]

            #Create instance
            vm_id = self.create_vm(
                key_pair=kp,
                image_id="",
                block_mapping_data=self.block_mapping_details,
                vm_cleanup=False)
            LOG.debug("VM ID : " + str(vm_id))
            time.sleep(30)

            #Create and attach volume
            volume_id = self.create_volume(
                volume_type_id=CONF.volume.volume_type_id,
                volume_cleanup=False)
            LOG.debug("Volume ID: " + str(volume_id))
            volumes = tvaultconf.volumes_parts

            self.attach_volume(volume_id, vm_id, attach_cleanup=False)
            LOG.debug("Volume attached")

            #Assign floating IP
            floating_ip_1 = self.assign_floating_ips(vm_id, False)
            LOG.debug("Assigned floating IP : " + str(floating_ip_1))
            LOG.debug("Sleeping for 40 sec")
            time.sleep(40)

            #Adding data and calculating md5sums
            self.data_ops(floating_ip_1, mount_points[0], 3)
            LOG.debug("Created disk and mounted the attached volume")

            md5sums_dir_before = self.calcmd5sum(floating_ip_1,
                                                 mount_points[0])
            LOG.debug("MD5sums for directory on original vm : " +
                      str(md5sums_dir_before))

            workload_create = command_argument_string.workload_create + " --instance instance-id=" + str(
                vm_id)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step("Execute workload-create command",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload-create command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-create command",
                                        tvaultconf.PASS)
                LOG.debug("Workload-create command executed correctly")

            time.sleep(10)
            workload_id = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(workload_id))
            if (workload_id != None):
                self.wait_for_workload_tobe_available(workload_id)
                if (self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.workload_delete, workload_id)

            ### Full Snapshot ###

            self.created = False

            #Create snapshot with CLI command
            create_snapshot = command_argument_string.snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-snapshot command with --full",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command did not execute correctly for full snapshot")
            else:
                reporting.add_test_step(
                    "Execute workload-snapshot command with --full",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly for full snapshot")

            snapshot_id = query_data.get_inprogress_snapshot_id(workload_id)
            LOG.debug("Snapshot ID: " + str(snapshot_id))
            wc = self.wait_for_snapshot_tobe_available(workload_id,
                                                       snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Full snapshot", tvaultconf.PASS)
                self.created = True
            else:
                if (str(wc) == "error"):
                    pass
            if (self.created == False):
                reporting.add_test_step("Full snapshot", tvaultconf.FAIL)
                raise Exception("Workload snapshot did not get created")

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.snapshot_delete, workload_id, snapshot_id)

            LOG.debug("Sleeping for 40s")
            time.sleep(40)

            #Add some more data to files on VM
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.addCustomfilesOnLinuxVM(ssh, mount_points[0], 2)
            ssh.close()

            ### Incremental snapshot ###

            self.created = False

            #Create incremental snapshot using CLI command
            create_snapshot = command_argument_string.incr_snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step("Execute workload-snapshot command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-snapshot command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            incr_snapshot_id = query_data.get_inprogress_snapshot_id(
                workload_id)
            LOG.debug("Incremental Snapshot ID: " + str(incr_snapshot_id))
            #Wait for incremental snapshot to complete
            wc = self.wait_for_snapshot_tobe_available(workload_id,
                                                       incr_snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Incremental snapshot",
                                        tvaultconf.PASS)
                LOG.debug(
                    "Workload incremental snapshot successfully completed")
                self.created = True
            if (self.created == False):
                reporting.add_test_step("Incremental snapshot",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload incremental snapshot did not get created")

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.snapshot_delete, workload_id,
                                incr_snapshot_id)

            ### Selective restore ###

            instance_details = []
            network_details = []
            restored_vm_details = []
            vms_details_after_restore = []
            temp_vdisks_data = []

            int_net_1_name = self.get_net_name(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_name" + str(int_net_1_name))
            int_net_1_subnets = self.get_subnet_id(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_subnet" + str(int_net_1_subnets))

            temp_vdisks_data.append([{
                'id': volume_id,
                'availability_zone': CONF.volume.volume_availability_zone,
                'new_volume_type': CONF.volume.volume_type
            }])

            LOG.debug("Vdisks details for restore" + str(temp_vdisks_data))

            #Create instance details for restore.json
            vm_name = "tempest_test_vm_" + vm_id + "_selectively_restored"
            temp_instance_data = {
                'id': vm_id,
                'availability_zone': CONF.compute.vm_availability_zone,
                'include': True,
                'restore_boot_disk': True,
                'name': vm_name,
                'vdisks': temp_vdisks_data[0]
            }
            instance_details.append(temp_instance_data)
            LOG.debug("Instance details for restore: " + str(instance_details))

            #Create network details for restore.json
            snapshot_network = {
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            target_network = {
                'name': int_net_1_name,
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            network_details = [{
                'snapshot_network': snapshot_network,
                'target_network': target_network
            }]
            LOG.debug("Network details for restore: " + str(network_details))
            LOG.debug("Snapshot id : " + str(snapshot_id))

            #Trigger selective restore
            restore_id_1 = self.snapshot_selective_restore(
                workload_id,
                snapshot_id,
                restore_name=tvaultconf.restore_name,
                restore_cleanup=True,
                instance_details=instance_details,
                network_details=network_details)
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if (self.getRestoreStatus(workload_id, snapshot_id,
                                      restore_id_1) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                raise Exception("Selective restore failed")

            #Fetch instance details after restore
            vm_list = self.get_restored_vm_list(restore_id_1)
            LOG.debug("Restored vm(selective) ID : " + str(vm_list))
            time.sleep(60)
            floating_ip_2 = self.assign_floating_ips(vm_list[0], True)
            LOG.debug("Floating ip assigned to selective restore vm -> " +
                      str(floating_ip_2))
            md5sums_dir_after = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_2))
            self.execute_command_disk_mount(ssh, str(floating_ip_2),
                                            [volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_dir_after = self.calcmd5sum(floating_ip_2, mount_points[0])
            ssh.close()

            LOG.debug("MD5SUMS before restore")
            LOG.debug(md5sums_dir_before[str(floating_ip_1)])
            LOG.debug("MD5SUMS after restore")
            LOG.debug(md5sums_dir_after[str(floating_ip_2)])

            if md5sums_dir_before[str(floating_ip_1)] == md5sums_dir_after[str(
                    floating_ip_2)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            for id in range(len(vm_list)):
                restored_vm_details.append(self.get_vm_details(vm_list[id]))
            LOG.debug("Restored vm details list: " + str(restored_vm_details))

            vms_details_after_restore = self.get_vms_details_list(
                restored_vm_details)
            LOG.debug("VM details after restore: " +
                      str(vms_details_after_restore))
            #Compare the data before and after restore
            for i in range(len(vms_details_after_restore)):
                if (vms_details_after_restore[i]['network_name'] ==
                        int_net_1_name):
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.PASS)
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error(
                        "Restored network: " +
                        str(vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)

            ### In-place restore ###

            #Create in-place restore with CLI command
            restore_command = command_argument_string.inplace_restore + str(
                tvaultconf.restore_filename) + " " + str(incr_snapshot_id)

            LOG.debug("inplace restore cli command: " + str(restore_command))
            #Restore.json with only volume 2 excluded
            restore_json = json.dumps({
                'openstack': {
                    'instances': [{
                        'restore_boot_disk':
                        True,
                        'include':
                        True,
                        'id':
                        vm_id,
                        'vdisks': [{
                            'restore_cinder_volume': True,
                            'id': volume_id,
                            'new_volume_type': CONF.volume.volume_type
                        }],
                    }],
                    'networks_mapping': {
                        'networks': []
                    }
                },
                'restore_type': 'inplace',
                'type': 'openstack'
            })
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            #Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(json.loads(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #get restore id from database
            restore_id_2 = query_data.get_snapshot_restore_id(incr_snapshot_id)
            self.wait_for_snapshot_tobe_available(workload_id,
                                                  incr_snapshot_id)

            #get in-place restore status
            if (self.getRestoreStatus(workload_id, incr_snapshot_id,
                                      restore_id_2) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            #Fetch instance details after restore
            vm_list = []
            vm_list = self.get_restored_vm_list(restore_id_2)
            LOG.debug("Restored vm(In-place) ID : " + str(vm_list))

            time.sleep(40)
            md5sums_dir_after = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.execute_command_disk_mount(ssh, str(floating_ip_1),
                                            [volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_dir_after = self.calcmd5sum(floating_ip_1, mount_points[0])
            ssh.close()

            LOG.debug("<----md5sums_dir_before---->")
            LOG.debug(md5sums_dir_before[str(floating_ip_1)])
            LOG.debug("<----md5sums_dir_after---->")
            LOG.debug(md5sums_dir_after[str(floating_ip_1)])

            if md5sums_dir_before[str(floating_ip_1)] == md5sums_dir_after[str(
                    floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            #Delete restore for snapshot
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id,
                                incr_snapshot_id, restore_id_2)

            ### One-click restore ###

            mdb = self.calcmd5sum(floating_ip_1, mount_points[0])
            LOG.debug(
                "MD5SUMS before deleting the instance for one click restore : "
                + str(mdb))

            self.disassociate_floating_ip_from_server(floating_ip_1, vm_id)
            self.detach_volume(vm_id, volume_id)

            #Delete the original instance
            self.delete_vm(vm_id)
            LOG.debug(
                "Instance deleted successfully for one click restore : " +
                str(vm_id))
            time.sleep(10)

            #Delete bootable volume of original instance
            self.delete_volume(boot_volume_id)
            LOG.debug("Bootable volume of original instance deleted")

            #Delete volume attached to original instance
            self.delete_volume(volume_id)
            LOG.debug("Volumes deleted successfully for one click restore : " +
                      str(volume_id))

            deleted = 1

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            restore_id_3 = query_data.get_snapshot_restore_id(snapshot_id)
            LOG.debug("Restore ID: " + str(restore_id_3))

            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if (self.getRestoreStatus(workload_id, snapshot_id,
                                      restore_id_3) == "available"):
                reporting.add_test_step("One-click restore", tvaultconf.PASS)
                LOG.debug("One-click restore passed")
            else:
                reporting.add_test_step("One-click restore", tvaultconf.FAIL)
                LOG.debug("One-click restore failed")
                raise Exception("One-click restore failed")
            LOG.debug("One-click restore complete")

            #Fetch instance details after restore
            vm_list = []
            vm_list = self.get_restored_vm_list(restore_id_3)
            LOG.debug("Restored vms : " + str(vm_list))

            mda = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.execute_command_disk_mount(ssh, str(floating_ip_1),
                                            [volumes[0]], [mount_points[0]])
            time.sleep(5)
            mda = self.calcmd5sum(floating_ip_1, mount_points[0])
            LOG.debug(
                "MD5SUMS after deleting the instance for one click restore : "
                + str(mda))
            ssh.close()

            if mdb[str(floating_ip_1)] == mda[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            restored_volumes = []
            restored_volumes = self.get_restored_volume_list(restore_id_3)
            LOG.debug("Restored volumes : ")
            LOG.debug(restored_volumes)

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, snapshot_id,
                                restore_id_3)
                time.sleep(30)
                self.addCleanup(self.delete_restored_vms, vm_list,
                                restored_volumes)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            if (deleted == 0):
                self.disassociate_floating_ip_from_server(floating_ip_1, vm_id)
                self.detach_volume(vm_id, volume_id)
                self.delete_vm(vm_id)
                time.sleep(10)
                self.delete_volume(volume_id)
                self.delete_volume(boot_volume_id)
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
Example #3
0
    def test_tvault_inplace_cli_delete_vm(self):
        try:

            volumes = ["/dev/vdb", "/dev/vdc"]
            mount_points = ["mount_data_b", "mount_data_c"]

            #Fill some data on each of the volumes attached
            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[0]))
            self.addCustomSizedfilesOnLinux(ssh, mount_points[0], 1)
            ssh.close()

            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[1]))
            self.addCustomSizedfilesOnLinux(ssh, mount_points[0], 1)
            self.addCustomSizedfilesOnLinux(ssh, mount_points[1], 1)
            ssh.close()

            #Fill some more data on each volume attached
            tree = lambda: collections.defaultdict(tree)
            self.md5sums_dir_before = tree()

            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[0]))
            self.md5sums_dir_before[str(self.floating_ips_list[0])][str(
                mount_points[0])] = self.calculatemmd5checksum(
                    ssh, mount_points[0])
            ssh.close()

            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[1]))
            self.md5sums_dir_before[str(self.floating_ips_list[1])][str(
                mount_points[0])] = self.calculatemmd5checksum(
                    ssh, mount_points[0])
            self.md5sums_dir_before[str(self.floating_ips_list[1])][str(
                mount_points[1])] = self.calculatemmd5checksum(
                    ssh, mount_points[1])
            ssh.close()

            LOG.debug("md5sums_dir_before" + str(self.md5sums_dir_before))

            #delete vm and delete on volume
            self.delete_vm(self.workload_instances[0])
            self.delete_volume(self.volumes_list[1])

            #Create in-place restore with CLI command
            restore_command = command_argument_string.inplace_restore + str(
                tvaultconf.restore_filename) + " " + str(self.incr_snapshot_id)

            LOG.debug("inplace restore cli command: " + str(restore_command))
            #Restore.json with only volume 2 excluded
            restore_json = json.dumps({
                'openstack': {
                    'instances': [{
                        'restore_boot_disk': True,
                        'include': False,
                        'id': self.workload_instances[0],
                        'vdisks': []
                    }, {
                        'restore_boot_disk':
                        True,
                        'include':
                        True,
                        'id':
                        self.workload_instances[1],
                        'vdisks': [{
                            'restore_cinder_volume': True,
                            'id': self.volumes_list[2],
                            'new_volume_type': CONF.volume.volume_type
                        }]
                    }],
                    'networks_mapping': {
                        'networks': []
                    }
                },
                'restore_type': 'inplace',
                'type': 'openstack'
            })
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            #Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(yaml.safe_load(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #get restore id from database
            self.restore_id = query_data.get_snapshot_restore_id(
                self.incr_snapshot_id)
            self.wait_for_snapshot_tobe_available(self.workload_id,
                                                  self.incr_snapshot_id)

            #get in-place restore status
            if (self.getRestoreStatus(self.workload_id, self.incr_snapshot_id,
                                      self.restore_id) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            # mount volumes after restore
            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[1]))
            self.execute_command_disk_mount(ssh,
                                            str(self.floating_ips_list[1]),
                                            volumes, mount_points)
            ssh.close()

            # calculate md5 after inplace restore
            tree = lambda: collections.defaultdict(tree)
            md5_sum_after_in_place_restore = tree()

            ssh = self.SshRemoteMachineConnectionWithRSAKey(
                str(self.floating_ips_list[1]))
            md5_sum_after_in_place_restore[str(self.floating_ips_list[1])][str(
                mount_points[0])] = self.calculatemmd5checksum(
                    ssh, mount_points[0])
            md5_sum_after_in_place_restore[str(self.floating_ips_list[1])][str(
                mount_points[1])] = self.calculatemmd5checksum(
                    ssh, mount_points[1])
            ssh.close()

            LOG.debug("md5_sum_after_in_place_restore" +
                      str(md5_sum_after_in_place_restore))

            # md5 sum verification
            if self.md5sums_dir_before[str(self.floating_ips_list[1])][str(
                    mount_points[0])] == md5_sum_after_in_place_restore[str(
                        self.floating_ips_list[1])][str(mount_points[0])]:
                reporting.add_test_step("Md5 Verification for volume 1",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step("Md5 Verification for volume 1",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            if self.md5sums_dir_before[str(self.floating_ips_list[1])][str(
                    mount_points[1])] != md5_sum_after_in_place_restore[str(
                        self.floating_ips_list[1])][str(mount_points[1])]:
                reporting.add_test_step("Md5 Verification for volume 2",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step("Md5 Verification for volume 2",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            #Delete restore for snapshot
            self.restored_volumes = self.get_restored_volume_list(
                self.restore_id)
            if tvaultconf.cleanup == True:
                self.restore_delete(self.workload_id, self.incr_snapshot_id,
                                    self.restore_id)
                LOG.debug("Snapshot Restore deleted successfully")

                #Delete restored volumes and volume snapshots
                self.delete_volumes(self.restored_volumes)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
Example #4
0
    def test_tvault_rbac_backuprole_touser_policyjson(self):
        try:
            workload_create_error_str = "Policy doesn't allow workload:workload_create to be performed."
            snapshot_create_error_str = "Policy doesn't allow workload:workload_snapshot to be performed."
            restore_create_error_str = "Policy doesn't allow snapshot:snapshot_restore to be performed."
            workload_delete_error_str = "Policy doesn't allow workload:workload_delete to be performed."
            snapshot_delete_error_str = "Policy doesn't allow snapshot:snapshot_delete to be performed."
            restore_delete_error_str = "Policy doesn't allow restore:restore_delete to be performed."

            # Change policy.json file on tvault to change role and rule
            self.change_policyjson_file("backup", "backup_api")
            self.instances_id = []

            # Create volume, Launch an Instance
            self.volumes_id = self.create_volume(volume_cleanup=False)
            LOG.debug("Volume-1 ID: " + str(self.volumes_id))
            self.instances_id.append(self.create_vm(vm_cleanup=False))
            LOG.debug("VM-1 ID: " + str(self.instances_id[0]))
            self.attach_volume(self.volumes_id, self.instances_id[0])
            LOG.debug("Volume attached")

            # Use backupuser credentials
            os.environ['OS_USERNAME'] = CONF.identity.backupuser
            os.environ['OS_PASSWORD'] = CONF.identity.backupuser_password

            # Create workload with CLI by backup role
            workload_create = command_argument_string.workload_create + \
                " --instance instance-id=" + str(self.instances_id[0])
            error = cli_parser.cli_error(workload_create)
            if error and (str(error.strip('\n')).find('ERROR') != -1):
                LOG.debug("workload creation unsuccessful by backup role")
                raise Exception(
                    "RBAC policy fails for workload creation by backup role")
            else:
                LOG.debug("Workload created successfully by backup role")
                reporting.add_test_step(
                    "Execute workload_create command by backup role",
                    tvaultconf.PASS)
                time.sleep(10)
                self.wid1 = query_data.get_workload_id(
                    tvaultconf.workload_name)
                workload_available = self.wait_for_workload_tobe_available(
                    self.wid1)

            # Run snapshot_create CLI by backup role
            snapshot_create = command_argument_string.snapshot_create + \
                str(self.wid1)
            LOG.debug("snapshot_create command: " + str(snapshot_create))
            error = cli_parser.cli_error(snapshot_create)
            if error and (str(error.strip('\n')).find('ERROR') != -1):
                reporting.add_test_step(
                    "Execute snapshot_create command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_create did not execute correctly by backup role")
            else:
                reporting.add_test_step(
                    "Execute snapshot_create command by backup role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_create executed correctly by backup role")
                self.snapshot_id1 = query_data.get_inprogress_snapshot_id(
                    self.wid1)
                wc = self.wait_for_snapshot_tobe_available(
                    self.wid1, self.snapshot_id1)

            # Delete the original instance
            self.delete_vm(self.instances_id[0])
            LOG.debug("Instance deleted successfully for restore")

            # Delete corresponding volume
            self.delete_volume(self.volumes_id)
            LOG.debug("Volume deleted successfully for restore")

            # Create one-click restore using CLI command by backup role
            restore_command = command_argument_string.oneclick_restore + \
                " " + str(self.snapshot_id1)
            error = cli_parser.cli_error(restore_command)
            if error and (str(error.strip('\n')).find('ERROR') != -1):
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command one-click restore did not execute correctly by backup role")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command by backup role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command one-click restore executed correctly backup role")
                wc = self.wait_for_snapshot_tobe_available(
                    self.wid1, self.snapshot_id1)
                self.restore_id1 = query_data.get_snapshot_restore_id(
                    self.snapshot_id1)
                LOG.debug("Restore ID: " + str(self.restore_id1))
                self.restore_vm_id1 = self.get_restored_vm_list(
                    self.restore_id1)
                LOG.debug("Restore VM ID: " + str(self.restore_vm_id1))
                self.restore_volume_id1 = self.get_restored_volume_list(
                    self.restore_id1)
                LOG.debug("Restore Volume ID: " + str(self.restore_volume_id1))


            # Use admin credentials
            os.environ['OS_USERNAME'] = CONF.identity.username
            os.environ['OS_PASSWORD'] = CONF.identity.password

            # Create workload with CLI by admin role
            workload_create = command_argument_string.workload_create + \
                " --instance instance-id=" + str(self.restore_vm_id1[0])
            error = cli_parser.cli_error(workload_create)
            if error and (str(error.strip('\n')).find(workload_create_error_str) != -1):
                LOG.debug(
                    "Command workload_create did not execute correctly by admin role")
                reporting.add_test_step(
                    "Can not execute workload_create command by admin role",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Command workload_create did not execute correctly by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload_create executed correctly by admin role")

            # Run snapshot_create CLI by admin role
            import pdb; pdb.set_trace()
            snapshot_create = command_argument_string.snapshot_create + \
                str(self.wid1)
            LOG.debug("snapshot_create command: " + str(snapshot_create))
            error = cli_parser.cli_error(snapshot_create)
            if error and (str(error.strip('\n')).find(snapshot_create_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute snapshot_create command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_create did not execute correctly by admin role")
            else:
                reporting.add_test_step(
                    "Can not execute snapshot_create command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_create executed correctly by admin role")

            # Create one-click restore using CLI command by admin role
            restore_command = command_argument_string.oneclick_restore + \
                " " + str(self.snapshot_id1)
            error = cli_parser.cli_error(restore_command)
            if error and (str(error.strip('\n')).find(restore_create_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute restore_create command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_create did not execute correctly by admin role")
            else:
                reporting.add_test_step(
                    "Can not execute restore_create command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command restore_create executed correctly by admin role")

            # Run restore_delete CLI by admin role
            restore_delete = command_argument_string.restore_delete + \
                str(self.restore_id1)
            error = cli_parser.cli_error(restore_delete)
            if error and (str(error.strip('\n')).find(restore_delete_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute restore_delete command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_delete did not execute correctly by admin role")
            else:
                reporting.add_test_step(
                    "Can not execute restore_delete command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command restore_delete executed correctly by admin role")

            # Run snapshot_delete CLI by admin role
            snapshot_delete = command_argument_string.snapshot_delete + \
                str(self.snapshot_id1)
            error = cli_parser.cli_error(snapshot_delete)
            if error and (str(error.strip('\n')).find(snapshot_delete_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute snapshot_delete command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_delete did not execute correctly by admin role")
            else:
                reporting.add_test_step(
                    "Can not execute snapshot_delete command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_delete executed correctly by admin role")

            # Delete workload with CLI by admin role
            workload_delete = command_argument_string.workload_delete + \
                str(self.wid1)
            error = cli_parser.cli_error(workload_delete)
            if error and (str(error.strip('\n')).find(workload_delete_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute workload_delete command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command workload_delete did not execute correctly by admin role")
            else:
                reporting.add_test_step(
                    "Can not execute workload_delete command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload_delete executed correctly by admin role")

            # Use nonadmin credentials
            os.environ['OS_USERNAME'] = CONF.identity.nonadmin_user
            os.environ['OS_PASSWORD'] = CONF.identity.nonadmin_password

            # Create workload with CLI by default role
            workload_create = command_argument_string.workload_create + \
                " --instance instance-id=" + str(self.restore_vm_id1)
            error = cli_parser.cli_error(workload_create)
            if error and (str(error.strip('\n')).find(workload_create_error_str) != -1):
                LOG.debug(
                    "Command workload_create did not execute correctly by default role")
                reporting.add_test_step(
                    "Can not execute workload_create command by default role",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Can not execute workload_create command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload_create executed correctly by default role")

            # Run snapshot_create CLI by default role
            snapshot_create = command_argument_string.snapshot_create + \
                str(self.wid1)
            error = cli_parser.cli_error(snapshot_create)
            if error and (str(error.strip('\n')).find(snapshot_create_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute snapshot_create command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_create did not execute correctly by default role")
            else:
                reporting.add_test_step(
                    "Can not execute snapshot_create command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_create executed correctly by default role")

            # Create one-click restore using CLI by default role
            restore_command = command_argument_string.oneclick_restore + \
                " " + str(self.snapshot_id1)
            error = cli_parser.cli_error(restore_command)
            if error and (str(error.strip('\n')).find(restore_create_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute restore_create command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_create did not execute correctly by default role")
            else:
                reporting.add_test_step(
                    "Can not execute restore_create command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command restore_create executed correctly by default role")

            # Run restore_delete CLI by default role
            restore_delete = command_argument_string.restore_delete + \
                str(self.restore_id1)
            error = cli_parser.cli_error(restore_delete)
            if error and (str(error.strip('\n')).find(restore_delete_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute restore_delete command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_delete did not execute correctly by default role")
            else:
                reporting.add_test_step(
                    "Can not execute restore_delete command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command restore_delete executed correctly by default role")

            # Run snapshot_delete CLI by default role
            snapshot_delete = command_argument_string.snapshot_delete + \
                str(self.snapshot_id1)
            LOG.debug("snapshot_delete command: " + str(snapshot_delete))
            error = cli_parser.cli_error(snapshot_delete)
            if error and (str(error.strip('\n')).find(snapshot_delete_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute snapshot_delete command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_delete did not execute correctly by default role")
            else:
                reporting.add_test_step(
                    "Can not execute snapshot_delete command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_delete executed correctly by default role")

            # Delete workload with CLI by default role
            workload_delete = command_argument_string.workload_delete + \
                str(self.wid1)
            error = cli_parser.cli_error(workload_delete)
            if error and (str(error.strip('\n')).find(workload_delete_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute workload_delete command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command workload_delete did not execute correctly by default role")
            else:
                reporting.add_test_step(
                    "Can not execute workload_delete command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload_delete executed correctly by default role")

            # Use backupuser credentials
            os.environ['OS_USERNAME'] = CONF.identity.backupuser
            os.environ['OS_PASSWORD'] = CONF.identity.backupuser_password

            # Run restore_delete CLI by backup role
            restore_delete = command_argument_string.restore_delete + \
                str(self.restore_id1)
            error = cli_parser.cli_error(restore_delete)
            if error and (str(error.strip('\n')).find(restore_delete_error_str) != -1):
                reporting.add_test_step(
                    "Execute  restore_delete command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command  restore_delete did not execute correctly by backup role")
            else:
                reporting.add_test_step(
                    "Execute restore_delete command by backup role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_delete executed correctly by backup role")
                wc = self.wait_for_snapshot_tobe_available(
                    self.wid1, self.snapshot_id1)
                # Delete restored VM instance and volume
                self.delete_restored_vms(
                    self.restore_vm_id1, self.restore_volume_id1)
                LOG.debug("Restored VMs deleted successfully by backup role")

            # Run snapshot_delete CLI by backup role
            snapshot_delete = command_argument_string.snapshot_delete + \
                str(self.snapshot_id1)
            LOG.debug("snapshot_delete command: " + str(snapshot_delete))
            error = cli_parser.cli_error(snapshot_delete)
            if error and (str(error.strip('\n')).find(snapshot_delete_error_str) != -1):
                reporting.add_test_step(
                    "Execute snapshot_delete command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_delete did not execute correctly by backup role")
            else:
                reporting.add_test_step(
                    "Execute snapshot_delete command by backup role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_delete executed correctly by backup role")
                workload_available = self.wait_for_workload_tobe_available(
                    self.wid1)

            # Delete workload with CLI by backup role
            workload_delete = command_argument_string.workload_delete + \
                str(self.wid1)
            error = cli_parser.cli_error(workload_delete)
            if error and (str(error.strip('\n')).find(workload_delete_error_str) != -1):
                reporting.add_test_step(
                    "Execute workload_delete command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "RBAC policy fails for workload deletion by backup role")
            else:
                LOG.debug("Workload deleted successfully by backup role")
                reporting.add_test_step(
                    "Execute workload_delete command by backup role",
                    tvaultconf.PASS)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
Example #5
0
    def test_1_volume_booted(self):
        try:
            ### Create vm ###
            deleted = 0
            reporting.add_test_script(str(__name__))

            volume_id = self.create_volume(size=tvaultconf.bootfromvol_vol_size, image_id=CONF.compute.image_ref, volume_cleanup=False)
            self.set_volume_as_bootable(volume_id)
            self.block_mapping_details = [{ "source_type": "volume",
                            "delete_on_termination": "false",
                            "boot_index": 0,
                            "uuid": volume_id,
                            "destination_type": "volume"}]
            vm_id = self.create_vm(image_id="", block_mapping_data=self.block_mapping_details, vm_cleanup=False)

            ### Create workload ###

            workload_id=self.workload_create([vm_id],tvaultconf.parallel, workload_cleanup=True)
            LOG.debug("Workload ID: " + str(workload_id))
            if(workload_id != None):
                self.wait_for_workload_tobe_available(workload_id)
                if(self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                raise Exception("Workload creation failed") 

            ### Full snapshot ###

            snapshot_id=self.workload_snapshot(workload_id, True, snapshot_cleanup=True)
            LOG.debug("\nworkload id : {}\n".format(str(workload_id)))
            LOG.debug("\nsnapshot id : {}\n".format(str(snapshot_id)))    
            time.sleep(5)
            self.wait_for_workload_tobe_available(workload_id)
            if(self.getSnapshotStatus(workload_id, snapshot_id) == "available"):
                reporting.add_test_step("Create full snapshot of boot from volume instance", tvaultconf.PASS)
                LOG.debug("Full snapshot available!!")
            else:
                reporting.add_test_step("Create full snapshot of boot from volume instance", tvaultconf.FAIL)
                raise Exception("Snapshot creation failed")

            volume_snapshots = self.get_available_volume_snapshots()

            LOG.debug("\nvolume is : {}\n".format(str(volume_id)))
            LOG.debug("\nvolume snapshots : {}\n".format(str(volume_snapshots)))

            ### Incremental snapshot ###

            self.created = False
            LOG.debug("workload is:" + str(workload_id))

            create_snapshot = command_argument_string.incr_snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step("Execute workload-snapshot command", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-snapshot command", tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            incr_snapshot_id = query_data.get_inprogress_snapshot_id(workload_id)
            LOG.debug("Incremental Snapshot ID: " + str(incr_snapshot_id))
            #Wait for incremental snapshot to complete
            wc = self.wait_for_snapshot_tobe_available(workload_id, incr_snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Incremental snapshot", tvaultconf.PASS)
                LOG.debug("Workload incremental snapshot successfully completed")
                self.created = True
            if (self.created == False):
                reporting.add_test_step("Incremental snapshot", tvaultconf.FAIL)
                raise Exception ("Workload incremental snapshot did not get created")

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.snapshot_delete,workload_id, incr_snapshot_id)

            ### Selective restore ###

            instance_details = []
            network_details  = []
            restored_vm_details_list = []
            vms_details_after_restore = []
            int_net_1_name = self.get_net_name(CONF.network.internal_network_id)
            LOG.debug("int_net_1_name" + str(int_net_1_name))
            int_net_1_subnets = self.get_subnet_id(CONF.network.internal_network_id)
            LOG.debug("int_net_1_subnet" + str(int_net_1_subnets))

            #Create instance details for restore.json


            vm_name = "tempest_test_vm_"+vm_id+"_restored"
            temp_instance_data = { 'id': vm_id,
                   'availability_zone':CONF.compute.vm_availability_zone,
                                   'include': True,
                                   'restore_boot_disk': True,
                                   'name': vm_name
                                    }
            instance_details.append(temp_instance_data)
            LOG.debug("Instance details for restore: " + str(instance_details))

            #Create network details for restore.json
            snapshot_network = {
                                 'id': CONF.network.internal_network_id,
                                 'subnet': { 'id': int_net_1_subnets }
                               }
            target_network = { 'name': int_net_1_name,
                               'id': CONF.network.internal_network_id,
                               'subnet': { 'id': int_net_1_subnets }
                             }
            network_details = [ { 'snapshot_network': snapshot_network,
                                       'target_network': target_network } ]
            LOG.debug("Network details for restore: " + str(network_details))


            LOG.debug("Snapshot id : " + str(snapshot_id))
            #Trigger selective restore
            restore_id_1=self.snapshot_selective_restore(workload_id, snapshot_id, restore_cleanup=True, restore_name=tvaultconf.restore_name,
                                                            instance_details=instance_details, network_details=network_details)
            LOG.debug("\nRestore ID(selective) : {}\n".format(restore_id_1))
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if(self.getRestoreStatus(workload_id, snapshot_id, restore_id_1) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                raise Exception("Selective restore failed")

            #Fetch instance details after restore
            vm_list  =  self.get_restored_vm_list(restore_id_1)
            LOG.debug("Restored vm(selective) ID : " + str(vm_list))

            for id in range(len(vm_list)):
                restored_vm_details_list.append(self.get_vm_details(vm_list[id]))
            LOG.debug("Restored vm details list: " + str(restored_vm_details_list))

            vms_details_after_restore = self.get_vms_details_list(restored_vm_details_list)
            LOG.debug("VM details after restore: " + str(vms_details_after_restore))

            #Compare the data before and after restore
            for i in range(len(vms_details_after_restore)):
                if(vms_details_after_restore[i]['network_name'] == int_net_1_name):
                    reporting.add_test_step("Network verification for instance-" + str(i+1), tvaultconf.PASS)
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error("Restored network: " + str(vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step("Network verification for instance-" + str(i+1), tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)

            restored_volumes = self.get_restored_volume_list(restore_id_1)
            LOG.debug("Restored volumes list: "+str(restored_volumes))


            ### Inplace restore ###

            #Create in-place restore with CLI command
            restore_command  = command_argument_string.inplace_restore + str(tvaultconf.restore_filename) + " "  + str(incr_snapshot_id)

            LOG.debug("inplace restore cli command: " + str(restore_command))
            #Restore.json with only volume 2 excluded
            restore_json = json.dumps({
            'openstack': {
                'instances': [{
                    'restore_boot_disk': True,
                    'include': True,
                    'id': vm_id
                }],
                'networks_mapping': {
                    'networks': []
                }
            },
            'restore_type': 'inplace',
            'type': 'openstack'
    })
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            #Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(json.loads(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Triggering In-Place restore via CLI", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Triggering In-Place restore via CLI", tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #get restore id from database
            restore_id_2 = query_data.get_snapshot_restore_id(incr_snapshot_id)
            LOG.debug("\nRestore ID(inplace) : {}\n".format(restore_id_2))
            self.wait_for_snapshot_tobe_available(workload_id, incr_snapshot_id)

            #get in-place restore status
            if(self.getRestoreStatus(workload_id, incr_snapshot_id, restore_id_2) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            #Fetch instance details after restore
            vm_list  =  self.get_restored_vm_list(restore_id_2)
            LOG.debug("Restored vm(In-place) ID : " + str(vm_list))

            #Delete restore for snapshot
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, incr_snapshot_id, restore_id_2)

            ### Oneclick restore ###

            #Delete the original instance
            self.delete_vm(vm_id)
            LOG.debug("Instance deleted successfully for one click restore : "+str(vm_id))
            deleted = 1
            time.sleep(10)
            self.delete_volume(volume_id)
            LOG.debug("Volume deleted successfully for one click restore : "+str(volume_id))

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Execute snapshot-oneclick-restore command", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute snapshot-oneclick-restore command", tvaultconf.PASS)
                LOG.debug("Command executed correctly")


            restore_id_3 = query_data.get_snapshot_restore_id(snapshot_id)
            LOG.debug("\nRestore ID(oneclick): {}\n".format(str(restore_id_3)))

            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if(self.getRestoreStatus(workload_id, snapshot_id, restore_id_3) == "available"):
                reporting.add_test_step("One-click restore", tvaultconf.PASS)
                LOG.debug("One-click restore passed")
            else:
                reporting.add_test_step("One-click restore", tvaultconf.FAIL)
                LOG.debug("One-click restore failed")
                raise Exception("One-click restore failed")
            LOG.debug("One-click restore complete")

            restored_volumes = self.get_restored_volume_list(restore_id_3)
            vm_list  =  self.get_restored_vm_list(restore_id_3)

            LOG.debug("Restored vms : " + str(vm_list))
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, snapshot_id, restore_id_3)
                time.sleep(20)
                self.addCleanup(self.delete_restored_vms, vm_list, restored_volumes)
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            if (deleted == 0):
                self.delete_vm(vm_id)
                time.sleep(10)
                self.delete_volume(volume_id)
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
Example #6
0
    def test_6_regression(self):
        reporting.add_test_script(
            str(__name__) + "_one_click_restore_bootfrom_image")
        try:
            if self.exception != "":
                LOG.debug("pre req failed")
                reporting.add_test_step(str(self.exception), tvaultconf.FAIL)
                raise Exception(str(self.exception))
            LOG.debug("pre req completed")

            self.created = False

            #Delete the original instance
            self.delete_vms(self.workload_instances)
            self.delete_key_pair(tvaultconf.key_pair_name)
            self.delete_security_group(self.security_group_id)
            self.delete_flavor(self.flavor_id)
            LOG.debug("Instances deleted successfully")

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + self.snapshot_ids[
                1]
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            wc = query_data.get_snapshot_restore_status(
                tvaultconf.restore_name, self.snapshot_ids[1])
            LOG.debug("Snapshot restore status: " + str(wc))
            while (str(wc) != "available" or str(wc) != "error"):
                time.sleep(5)
                wc = query_data.get_snapshot_restore_status(
                    tvaultconf.restore_name, self.snapshot_ids[1])
                LOG.debug("Snapshot restore status: " + str(wc))
                if (str(wc) == "available"):
                    LOG.debug("Snapshot Restore successfully completed")
                    reporting.add_test_step(
                        "Snapshot one-click restore verification with DB",
                        tvaultconf.PASS)
                    self.created = True
                    break
                else:
                    if (str(wc) == "error"):
                        break

            if (self.created == False):
                reporting.add_test_step(
                    "Snapshot one-click restore verification with DB",
                    tvaultconf.FAIL)
                raise Exception("Snapshot Restore did not get created")

            self.restore_id = query_data.get_snapshot_restore_id(
                self.snapshot_id)
            LOG.debug("Restore ID: " + str(self.restore_id))

            #Fetch instance details after restore
            self.restored_vm_details_list = []

            #restored vms list
            self.vm_list = self.get_restored_vm_list(self.restore_id)
            LOG.debug("Restored vms : " + str(self.vm_list))

            #restored vms all details list
            for id in range(len(self.workload_instances)):
                self.restored_vm_details_list.append(
                    self.get_vm_details(self.vm_list[id]))
            LOG.debug("Restored vm details list: " +
                      str(self.restored_vm_details_list))

            #required details of restored vms
            self.vms_details_after_restore = self.get_vms_details_list(
                self.restored_vm_details_list)
            LOG.debug("VM details after restore: " +
                      str(self.vms_details_after_restore))

            #Verify floating ips
            self.floating_ips_after_restore = []
            for i in range(len(self.vms_details_after_restore)):
                self.floating_ips_after_restore.append(
                    self.vms_details_after_restore[i]['floating_ip'])
            if (self.floating_ips_after_restore.sort() ==
                    self.floating_ips_list.sort()):
                reporting.add_test_step("Floating ip verification",
                                        tvaultconf.PASS)
            else:
                LOG.error("Floating ips before restore: " +
                          str(self.floating_ips_list.sort()))
                LOG.error("Floating ips after restore: " +
                          str(self.floating_ips_after_restore.sort()))
                reporting.add_test_step("Floating ip verification",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

        #calculate md5sum after restore
            tree = lambda: collections.defaultdict(tree)
            md5_sum_after_oneclick_restore = tree()
            for floating_ip in self.floating_ips_list:
                for mount_point in mount_points:
                    ssh = self.SshRemoteMachineConnectionWithRSAKey(
                        str(floating_ip))
                    md5_sum_after_oneclick_restore[str(floating_ip)][str(
                        mount_point)] = self.calculatemmd5checksum(
                            ssh, mount_point)
                    ssh.close()
            LOG.debug("md5_sum_after_oneclick_restore" +
                      str(md5_sum_after_oneclick_restore))

            #md5sum verification
            if (self.md5sums_dir_before == md5_sum_after_oneclick_restore):
                reporting.add_test_step("Md5 Verification", tvaultconf.PASS)
            else:
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.add_test_step("Md5 Verification", tvaultconf.FAIL)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
Example #7
0
    def test_1_image_booted(self):
        try:
            deleted = 0
            ## VM and Workload ###
            tests = [['tempest.api.workloadmgr.restore.test_image_booted_Selective-restore',0], ['tempest.api.workloadmgr.restore.test_image_booted_Inplace-restore',0], ['tempest.api.workloadmgr.restore.test_image_booted_Oneclick-restore',0]]
            reporting.add_test_script(tests[0][0])
            data_dir_path = "/root"
            md5sums_before_full = {}
            LOG.debug("******************")            
            kp = self.create_key_pair(tvaultconf.key_pair_name, keypair_cleanup=True)
            LOG.debug("Key_pair : "+str(kp))            

            vm_id = self.create_vm(key_pair=kp, vm_cleanup=False)
            LOG.debug("VM ID : "+str(vm_id))
            time.sleep(30)

            floating_ip_1 = self.assign_floating_ips(vm_id, False)
            LOG.debug("Assigned floating IP : "+str(floating_ip_1))

            LOG.debug("Sleeping for 20 sec")
            time.sleep(20)
            
            self.data_ops(floating_ip_1, data_dir_path, 3)
            LOG.debug("Created data")            

            md5sums_before_full = self.calcmd5sum(floating_ip_1, data_dir_path)
            LOG.debug("MD5sums for directory on original vm : "+str(md5sums_before_full))

            
            workload_create = command_argument_string.workload_create + " --instance instance-id=" +str(vm_id)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step("Execute workload-create command", tvaultconf.FAIL)
                raise Exception("Workload-create command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-create command", tvaultconf.PASS)
                LOG.debug("Workload-create command executed correctly")

            time.sleep(10)
            workload_id = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(workload_id))
            if(workload_id != None):
                self.wait_for_workload_tobe_available(workload_id)
                if(self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    raise Exception("Workload creation failed")
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                raise Exception("Workload creation failed")

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.workload_delete, workload_id)

            ### Full snapshot ###

            snapshot_id = self.create_snapshot(workload_id, is_full=True)

            #Add some more data to files on VM
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.addCustomfilesOnLinuxVM(ssh, data_dir_path, 2)
            ssh.close()
            md5sums_before_incremental = {}
            md5sums_before_incremental = self.calcmd5sum(floating_ip_1, data_dir_path)
            LOG.debug("\nMD5SUM after adding additional data before incremental snapshot : {}\n".format(md5sums_before_incremental))

            ### Incremental snapshot ###

            incr_snapshot_id = self.create_snapshot(workload_id, is_full=False)

            ### Selective restore ###

            rest_details = {}
            rest_details['rest_type'] = 'selective'
            rest_details['network_id'] = CONF.network.internal_network_id
            rest_details['subnet_id'] = self.get_subnet_id(CONF.network.internal_network_id)
            volumeslist =  []
            rest_details['instances'] = {vm_id:volumeslist}

            payload = self.create_restore_json(rest_details)
            #Trigger selective restore
            restore_id_1=self.snapshot_selective_restore(workload_id, snapshot_id,restore_name=tvaultconf.restore_name, restore_cleanup=True,
                                                            instance_details=payload['instance_details'], network_details=payload['network_details'])
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if(self.getRestoreStatus(workload_id, snapshot_id, restore_id_1) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                raise Exception("Selective restore failed")

            #Fetch instance details after restore
            vm_list  =  self.get_restored_vm_list(restore_id_1)
            LOG.debug("Restored vm(selective) ID : " + str(vm_list))
            time.sleep(60)
            floating_ip_2 = self.assign_floating_ips(vm_list[0], True)
            LOG.debug("Floating ip assigned to selective restore vm -> "+str(floating_ip_2))
            md5sums_after_selective = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_2))
            md5sums_after_selective = self.calcmd5sum(floating_ip_2, data_dir_path)
            ssh.close()

            LOG.debug("MD5SUMS before restore")
            LOG.debug(md5sums_before_full[str(floating_ip_1)])
            LOG.debug("MD5SUMS after selective restore")
            LOG.debug(md5sums_after_selective[str(floating_ip_2)])

            if md5sums_before_full[str(floating_ip_1)] == md5sums_after_selective[str(floating_ip_2)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.PASS)
                reporting.set_test_script_status(tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            restored_vm_details = []
            for id in range(len(vm_list)):
                restored_vm_details.append(self.get_vm_details(vm_list[id]))
            LOG.debug("Restored vm details list: " + str(restored_vm_details))

            vms_details_after_restore = self.get_vms_details_list(restored_vm_details)
            LOG.debug("VM details after restore: " + str(vms_details_after_restore))
            #Compare the data before and after restore
            int_net_1_name = self.get_net_name(CONF.network.internal_network_id) 
            for i in range(len(vms_details_after_restore)):
                if(vms_details_after_restore[i]['network_name'] == int_net_1_name):
                    reporting.add_test_step("Network verification for instance-" + str(i+1), tvaultconf.PASS)
                    tests[0][1] = 1
                    reporting.test_case_to_write()
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error("Restored network: " + str(vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step("Network verification for instance-" + str(i+1), tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
                    reporting.test_case_to_write()


            ### In-place Restore ###

            rest_details = {}
            rest_details['rest_type'] = 'inplace'
            rest_details['instances'] = {vm_id:volumeslist}

            reporting.add_test_script(tests[1][0]) 
            #Create in-place restore with CLI command
            restore_command  = command_argument_string.inplace_restore + str(tvaultconf.restore_filename) + " "  + str(snapshot_id)
            payload = self.create_restore_json(rest_details)
            restore_json = json.dumps(payload)
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            #Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(yaml.safe_load(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Triggering In-Place restore via CLI", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Triggering In-Place restore via CLI", tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #get restore id from database
            restore_id_2 = query_data.get_snapshot_restore_id(snapshot_id)
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)

            #get in-place restore status
            if(self.getRestoreStatus(workload_id, snapshot_id, restore_id_2) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            #Fetch instance details after restore
            vm_list = []
            vm_list  =  self.get_restored_vm_list(restore_id_2)
            LOG.debug("Restored vm(In-place) ID : " + str(vm_list))

            time.sleep(60)
            md5sums_after_inplace = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            md5sums_after_inplace = self.calcmd5sum(floating_ip_1, data_dir_path)
            ssh.close()

            LOG.debug("<----md5sums_before_full---->")
            LOG.debug(md5sums_before_full[str(floating_ip_1)])
            LOG.debug("<----md5sums_after_inplace---->")
            LOG.debug(md5sums_after_inplace[str(floating_ip_1)])

            if md5sums_before_full[str(floating_ip_1)] == md5sums_after_inplace[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.PASS)
                tests[1][1] = 1
                reporting.set_test_script_status(tvaultconf.PASS)
                reporting.test_case_to_write()
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.test_case_to_write()

            #Delete restore for snapshot
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, snapshot_id, restore_id_2)


            ### One-click restore ###

            reporting.add_test_script(tests[2][0])

            #Delete the original instance
            self.delete_vm(vm_id)
            LOG.debug("Instance deleted successfully for one click restore : "+str(vm_id))
            time.sleep(10)

            deleted = 1

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + incr_snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Execute snapshot-oneclick-restore command", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute snapshot-oneclick-restore command", tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            restore_id_3 = query_data.get_snapshot_restore_id(incr_snapshot_id)
            LOG.debug("Restore ID: " + str(restore_id_3))

            self.wait_for_snapshot_tobe_available(workload_id, incr_snapshot_id)
            if(self.getRestoreStatus(workload_id, incr_snapshot_id, restore_id_3) == "available"):
                reporting.add_test_step("One-click restore", tvaultconf.PASS)
                LOG.debug("One-click restore passed")
            else:
                reporting.add_test_step("One-click restore", tvaultconf.FAIL)
                LOG.debug("One-click restore failed")
                raise Exception("One-click restore failed")
            LOG.debug("One-click restore complete")

            #Fetch instance details after restore
            vm_list = []
            vm_list  =  self.get_restored_vm_list(restore_id_3)
            LOG.debug("Restored vms : " + str(vm_list))

            md5sums_after_1clickrestore = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            md5sums_after_1clickrestore = self.calcmd5sum(floating_ip_1, data_dir_path)
            LOG.debug("MD5SUMS after one click restore : {}".format(md5sums_after_1clickrestore))
            ssh.close()

            if md5sums_before_incremental[str(floating_ip_1)] == md5sums_after_1clickrestore[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.PASS)
                tests[2][0] = 1
                reporting.set_test_script_status(tvaultconf.PASS)
                reporting.test_case_to_write()
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.test_case_to_write()

            restored_volumes = []
            restored_volumes = self.get_restored_volume_list(restore_id_3)
            LOG.debug("Restored volumes : ")
            LOG.debug(restored_volumes)

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, incr_snapshot_id, restore_id_3)
                time.sleep(30)
                self.addCleanup(self.delete_restored_vms, vm_list, restored_volumes)

        except Exception as e:
            LOG.error("Exception: " + str(e))
            if (deleted == 0):
                try:
                    self.delete_vm(vm_id)
                except:
                    pass
            for test in tests:
                if test[1] != 1:
                    reporting.add_test_script(test[0])
                    reporting.set_test_script_status(tvaultconf.FAIL)
                    reporting.test_case_to_write()
    def test_tvault1040_oneclick_restore(self):
        try:
            #Prerequisites
            self.created = False
            self.workload_instances = []

            #Launch instance
            self.vm_id = self.create_vm(vm_cleanup=False)
            LOG.debug("VM ID: " + str(self.vm_id))

            #Create volume
            self.volume_id = self.create_volume(volume_cleanup=False)
            LOG.debug("Volume ID: " + str(self.volume_id))

            #Attach volume to the instance
            self.attach_volume(self.volume_id,
                               self.vm_id,
                               attach_cleanup=False)
            LOG.debug("Volume attached")

            #Create workload
            self.workload_instances.append(self.vm_id)
            self.wid = self.workload_create(
                self.workload_instances,
                tvaultconf.parallel,
                workload_name=tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(self.wid))
            time.sleep(5)

            #Create snapshot
            self.snapshot_id = self.workload_snapshot(self.wid, True,
                                                      tvaultconf.snapshot_name)
            LOG.debug("Snapshot ID: " + str(self.snapshot_id))

            #Wait till snapshot is complete
            self.wait_for_snapshot_tobe_available(self.wid, self.snapshot_id)

            #Delete the original instance
            self.delete_vm(self.vm_id)
            LOG.debug("Instance deleted successfully")

            #Delete corresponding volume
            self.delete_volume(self.volume_id)
            LOG.debug("Volume deleted successfully")

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + self.snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            wc = query_data.get_snapshot_restore_status(
                tvaultconf.restore_name, self.snapshot_id)
            LOG.debug("Snapshot restore status: " + str(wc))
            while (str(wc) != "available" or str(wc) != "error"):
                time.sleep(5)
                wc = query_data.get_snapshot_restore_status(
                    tvaultconf.restore_name, self.snapshot_id)
                LOG.debug("Snapshot restore status: " + str(wc))
                if (str(wc) == "available"):
                    LOG.debug("Snapshot Restore successfully completed")
                    reporting.add_test_step(
                        "Snapshot one-click restore verification with DB",
                        tvaultconf.PASS)
                    self.created = True
                    break
                else:
                    if (str(wc) == "error"):
                        break

            if (self.created == False):
                reporting.add_test_step(
                    "Snapshot one-click restore verification with DB",
                    tvaultconf.FAIL)
                raise Exception("Snapshot Restore did not get created")

            self.restore_id = query_data.get_snapshot_restore_id(
                self.snapshot_id)
            LOG.debug("Restore ID: " + str(self.restore_id))

            #Cleanup
            #self.volume_snapshots = self.get_available_volume_snapshots()
            #self.delete_volume_snapshots(self.volume_snapshots)

            #Delete restore for snapshot
            self.restore_delete(self.wid, self.snapshot_id, self.restore_id)
            LOG.debug("Snapshot Restore deleted successfully")

            #Delete restored VM instance and volume
            #self.delete_restored_vms(self.restore_id)
            #LOG.debug("Restored VM and volume deleted successfully")
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
Example #9
0
    def test_1_image_booted(self):
        try:
            ### Create vm and workload ###
            deleted = 0
            tests = [
                [
                    'tempest.api.workloadmgr.restore.test_image_booted_selective-restore',
                    0
                ],
                [
                    'tempest.api.workloadmgr.restore.test_image_booted_Inplace-restore',
                    0
                ],
                [
                    'tempest.api.workloadmgr.restore.test_image_booted_oneclick_restore',
                    0
                ]
            ]
            reporting.add_test_script(tests[0][0])
            self.created = False
            vm_id = self.create_vm(vm_cleanup=False)
            LOG.debug("\nVm id : {}\n".format(str(vm_id)))

            workload_create = command_argument_string.workload_create + " --instance instance-id=" + str(
                vm_id)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step("Execute workload-create command",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload-create command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-create command",
                                        tvaultconf.PASS)
                LOG.debug("Workload-create command executed correctly")

            time.sleep(10)
            workload_id = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(workload_id))
            if (workload_id != None):
                self.wait_for_workload_tobe_available(workload_id)
                if (self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    raise Exception("Workload creation failed")
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                raise Exception("Workload creation failed")

            LOG.debug("\nworkload id : {}\n".format(str(workload_id)))
            LOG.debug("\nvm id : {}\n".format(str(vm_id)))
            time.sleep(40)
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.workload_delete, workload_id)

            ### Full snapshot ###

            self.created = False

            #Create snapshot with CLI command
            create_snapshot = command_argument_string.snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-snapshot command with --full",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-snapshot command with --full",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            snapshot_id = query_data.get_inprogress_snapshot_id(workload_id)
            LOG.debug("\nFull-snapshot ID: {}".format(str(snapshot_id)))
            wc = self.wait_for_snapshot_tobe_available(workload_id,
                                                       snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Full snapshot", tvaultconf.PASS)
                self.created = True
            else:
                if (str(wc) == "error"):
                    pass
            if (self.created == False):
                reporting.add_test_step("Full snapshot", tvaultconf.FAIL)
                raise Exception("Workload snapshot did not get created")
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.snapshot_delete, workload_id, snapshot_id)

            ### Incremental snapshot ###

            self.created = False
            LOG.debug("workload is:" + str(workload_id))

            #Create incremental snapshot using CLI command
            create_snapshot = command_argument_string.incr_snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step("Execute workload-snapshot command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-snapshot command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            incr_snapshot_id = query_data.get_inprogress_snapshot_id(
                workload_id)
            LOG.debug("\nIncremental-snapshot ID: {}".format(
                str(incr_snapshot_id)))
            #Wait for incremental snapshot to complete
            wc = self.wait_for_snapshot_tobe_available(workload_id,
                                                       incr_snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Incremental snapshot",
                                        tvaultconf.PASS)
                LOG.debug(
                    "Workload incremental snapshot successfully completed")
                self.created = True
            if (self.created == False):
                reporting.add_test_step("Incremental snapshot",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload incremental snapshot did not get created")
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.snapshot_delete, workload_id,
                                incr_snapshot_id)

            ### Selective restore ###

            instance_details = []
            network_details = []
            restored_vm_details_list = []
            vms_details_after_restore = []
            int_net_1_name = self.get_net_name(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_name" + str(int_net_1_name))
            int_net_1_subnets = self.get_subnet_id(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_subnet" + str(int_net_1_subnets))

            #Create instance details for restore.json
            vm_name = "tempest_test_vm_" + vm_id + "_restored"
            temp_instance_data = {
                'id': vm_id,
                'availability_zone': CONF.compute.vm_availability_zone,
                'include': True,
                'restore_boot_disk': True,
                'name': vm_name
            }
            instance_details.append(temp_instance_data)
            LOG.debug("Instance details for restore: " + str(instance_details))

            #Create network details for restore.json
            snapshot_network = {
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            target_network = {
                'name': int_net_1_name,
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            network_details = [{
                'snapshot_network': snapshot_network,
                'target_network': target_network
            }]
            LOG.debug("Network details for restore: " + str(network_details))
            LOG.debug("Snapshot id : " + str(snapshot_id))

            #Trigger selective restore

            restore_id_1 = self.snapshot_selective_restore(
                workload_id,
                snapshot_id,
                restore_cleanup=True,
                restore_name=tvaultconf.restore_name,
                instance_details=instance_details,
                network_details=network_details)
            LOG.debug("\nselective-restore id : {}\n".format(
                str(restore_id_1)))
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if (self.getRestoreStatus(workload_id, snapshot_id,
                                      restore_id_1) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
                LOG.debug("Selective restore passed")
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                LOG.debug("Selective restore failed")
                raise Exception("Selective restore failed")
            LOG.debug("selective restore complete")

            #Fetch instance details after restore
            restored_vm_details_list = []
            vm_list = self.get_restored_vm_list(restore_id_1)
            LOG.debug("Restored vm(selective) ID : " + str(vm_list))

            for id in range(len(vm_list)):
                restored_vm_details_list.append(
                    self.get_vm_details(vm_list[id]))
            LOG.debug("Restored vm details list: " +
                      str(restored_vm_details_list))

            vms_details_after_restore = self.get_vms_details_list(
                restored_vm_details_list)
            LOG.debug("VM details after restore: " +
                      str(vms_details_after_restore))

            #Compare the data before and after restore
            for i in range(len(vms_details_after_restore)):
                if (vms_details_after_restore[i]['network_name'] ==
                        int_net_1_name):
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.PASS)
                    tests[0][1] = 1
                    reporting.set_test_script_status(tvaultconf.PASS)
                    reporting.test_case_to_write()
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error(
                        "Restored network: " +
                        str(vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
                    reporting.test_case_to_write()

            ### In-place restore ###

    #Create in-place restore with CLI command
            reporting.add_test_script(tests[1][0])
            restore_command = command_argument_string.inplace_restore + str(
                tvaultconf.restore_filename) + " " + str(incr_snapshot_id)

            LOG.debug("inplace restore cli command: " + str(restore_command))
            #Restore.json with only volume 2 excluded
            restore_json = json.dumps({
                'openstack': {
                    'instances': [{
                        'restore_boot_disk': True,
                        'include': True,
                        'id': vm_id
                    }],
                    'networks_mapping': {
                        'networks': []
                    }
                },
                'restore_type': 'inplace',
                'type': 'openstack'
            })
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            #Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(yaml.safe_load(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #get restore id from database
            restore_id_2 = query_data.get_snapshot_restore_id(incr_snapshot_id)
            LOG.debug("\ninplace-restore id : {}\n".format(str(restore_id_2)))

            self.wait_for_snapshot_tobe_available(workload_id,
                                                  incr_snapshot_id)

            #get in-place restore status
            if (self.getRestoreStatus(workload_id, incr_snapshot_id,
                                      restore_id_2) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")
            tests[1][1] = 1
            reporting.set_test_script_status(tvaultconf.PASS)
            reporting.test_case_to_write()
            #Fetch instance details after restore
            restored_vm_details_list = []
            vm_list = self.get_restored_vm_list(restore_id_2)
            LOG.debug("Restored vm(In-place) ID : " + str(vm_list))

            #Delete restore for snapshot
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id,
                                incr_snapshot_id, restore_id_2)
            LOG.debug("Snapshot Restore(in-place) deleted successfully")

            ### One-click Restore ###

            reporting.add_test_script(tests[2][0])
            #Delete the original instance
            self.delete_vm(vm_id)
            LOG.debug(
                "Instance deleted successfully for one click restore : " +
                str(vm_id))
            deleted = 1

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            restore_id_3 = query_data.get_snapshot_restore_id(snapshot_id)
            LOG.debug("\nRestore ID: {}\n".format(str(restore_id_3)))

            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if (self.getRestoreStatus(workload_id, snapshot_id,
                                      restore_id_3) == "available"):
                reporting.add_test_step("One-click restore", tvaultconf.PASS)
                LOG.debug("One-click restore passed")
            else:
                reporting.add_test_step("One-click restore", tvaultconf.FAIL)
                LOG.debug("One-click restore failed")
                raise Exception("One-click restore failed")
            LOG.debug("One-click restore complete")

            restored_volumes = self.get_restored_volume_list(restore_id_3)
            vm_list = self.get_restored_vm_list(restore_id_3)

            LOG.debug("Restored vms : " + str(vm_list))

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, snapshot_id,
                                restore_id_3)
                self.addCleanup(self.delete_restored_vms, vm_list,
                                restored_volumes)

            tests[2][1] = 1
            reporting.set_test_script_status(tvaultconf.PASS)
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            for test in tests:
                if test[1] != 1:
                    reporting.add_test_script(test[0])
                    reporting.set_test_script_status(tvaultconf.FAIL)
                    reporting.test_case_to_write()
            if (deleted == 0):
                try:
                    self.delete_vm(vm_id)
                except:
                    pass