Beispiel #1
0
    def test_tvault_rbac_nonadmin_ableto(self):
        try:
            # Use non-admin credentials
            os.environ['OS_USERNAME'] = CONF.identity.nonadmin_user
            os.environ['OS_PASSWORD'] = CONF.identity.nonadmin_password
            self.instances_id = []

            # Create volume, Launch an Instance
            self.volumes_id = self.create_volume(volume_cleanup=False)
            LOG.debug("Volume-1 ID: " + str(self.volumes_id))
            self.instances_id.append(self.create_vm(vm_cleanup=False))
            LOG.debug("VM-1 ID: " + str(self.instances_id[0]))
            self.attach_volume(self.volumes_id, self.instances_id[0])
            LOG.debug("Volume attached")

            # Create workload
            self.wid = self.workload_create(
                self.instances_id,
                tvaultconf.parallel,
                workload_name=tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(self.wid))
            workload_available = self.wait_for_workload_tobe_available(
                self.wid)
            if workload_available == True:
                LOG.debug("Workload created successfully")
                reporting.add_test_step("Verification of workload creation",
                                        tvaultconf.PASS)
                reporting.set_test_script_status(tvaultconf.PASS)
            else:
                LOG.debug("workload creation unsuccessful")
                reporting.add_test_step("Verification of workload creation",
                                        tvaultconf.FAIL)
                raise Exception(
                    "RBAC policy fails for workload creation by non-admin user"
                )

            # Create full snapshot
            self.snapshot_id = self.workload_snapshot(self.wid, True)
            LOG.debug("Snapshot ID-1: " + str(self.snapshot_id))
            #Wait till snapshot is complete
            snapshot_status = self.wait_for_snapshot_tobe_available(
                self.wid, self.snapshot_id)
            if snapshot_status == "available":
                LOG.debug("snapshot created successfully")
                reporting.add_test_step("Verification of snapshot creation",
                                        tvaultconf.PASS)
                reporting.set_test_script_status(tvaultconf.PASS)
            else:
                LOG.debug("snapshot creation unsuccessful")
                reporting.add_test_step("Verification of snapshot creation",
                                        tvaultconf.FAIL)
                raise Exception(
                    "RBAC policy fails for snapshot creation by non-admin user"
                )

            #Delete the original instance
            self.delete_vm(self.instances_id[0])
            LOG.debug("Instance deleted successfully")

            #Delete corresponding volume
            self.delete_volume(self.volumes_id)
            LOG.debug("Volume deleted successfully")

            #Create one-click restore
            restore_status = ""
            restore_name = "restore_1"
            restore_id = self.snapshot_restore(self.wid,
                                               self.snapshot_id,
                                               restore_name=restore_name)
            restore_status = query_data.get_snapshot_restore_status(
                restore_name, self.snapshot_id)
            LOG.debug("Snapshot restore status initial: " +
                      str(restore_status))
            while (str(restore_status) != "available"
                   and str(restore_status) != "error"):
                time.sleep(10)
                restore_status = query_data.get_snapshot_restore_status(
                    restore_name, self.snapshot_id)
                LOG.debug("Snapshot restore status: " + str(restore_status))
            if (str(restore_status) == "available"):
                LOG.debug("Snapshot Restore successfully completed")
                reporting.add_test_step(
                    "Snapshot one-click restore verification with DB",
                    tvaultconf.PASS)
            else:
                LOG.debug("Snapshot Restore unsuccessful")
                reporting.add_test_step(
                    "Snapshot one-click restore verification with DB",
                    tvaultconf.FAIL)

            # Launch recovery instance and Mount snapshot
            self.recoveryinstances_id = self.create_vm(
                flavor_id=CONF.compute.flavor_ref_alt,
                image_id=CONF.compute.fvm_image_ref)
            LOG.debug("VM-2 ID: " + str(self.recoveryinstances_id))
            status = self.mount_snapshot(self.wid, self.snapshot_id,
                                         self.recoveryinstances_id)
            if status == True:
                LOG.debug("snapshot Mounted successfully")
                reporting.add_test_step("Verification of snapshot mount",
                                        tvaultconf.PASS)
            else:
                LOG.debug("snapshot Mount unsuccessful")
                reporting.add_test_step("Verification of snapshot mount",
                                        tvaultconf.FAIL)
                raise Exception("snapshot does not Mount by non-admin user")

            # Run Filesearch
            vmid_to_search = self.instances_id[0]
            filepath_to_search = "/File_1.txt"
            filecount_in_snapshots = {self.snapshot_id: 0}
            filesearch_id = self.filepath_search(vmid_to_search,
                                                 filepath_to_search)
            snapshot_wise_filecount = self.verifyFilepath_Search(
                filesearch_id, filepath_to_search)
            for snapshot_id in filecount_in_snapshots.keys():
                if snapshot_wise_filecount[
                        snapshot_id] == filecount_in_snapshots[snapshot_id]:
                    filesearch_status = True
                else:
                    filesearch_status = False
                    LOG.debug("Filepath Search unsuccessful")
                    reporting.add_test_step("Verification of Filepath serach",
                                            tvaultconf.FAIL)
                    raise Exception(
                        "Filesearch path does not execute correctly by non-admin user"
                    )

            if filesearch_status == True:
                LOG.debug("Filepath_Search successful")
                reporting.add_test_step("Verification of Filepath serach",
                                        tvaultconf.PASS)

            reporting.set_test_script_status(tvaultconf.PASS)
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_1_image_volume(self):
        try:
            global volumes
            deleted = 0
            reporting.add_test_script(str(__name__))

            ## VM and Workload ###

            mount_points = ["mount_data_a", "mount_data_b"]
            md5sums_dir_before = {}
            LOG.debug("******************")
            kp = self.create_key_pair(tvaultconf.key_pair_name,
                                      keypair_cleanup=True)
            LOG.debug("Key_pair : " + str(kp))

            vm_id = self.create_vm(key_pair=kp, vm_cleanup=False)
            LOG.debug("VM ID : " + str(vm_id))

            volume_id = self.create_volume(volume_cleanup=False)
            LOG.debug("Volume ID: " + str(volume_id))
            volumes = tvaultconf.volumes_parts

            self.attach_volume(volume_id, vm_id, attach_cleanup=False)
            LOG.debug("Volume attached")

            floating_ip_1 = self.assign_floating_ips(vm_id, False)
            LOG.debug("Assigned floating IP : " + str(floating_ip_1))

            LOG.debug("Sleeping for 40 sec")
            time.sleep(40)

            self.data_ops(floating_ip_1, mount_points[0], 3)
            LOG.debug("Created disk and mounted the attached volume")

            md5sums_dir_before = self.calcmd5sum(floating_ip_1,
                                                 mount_points[0])
            LOG.debug("MD5sums for directory on original vm : " +
                      str(md5sums_dir_before))

            workload_create = command_argument_string.workload_create + " --instance instance-id=" + str(
                vm_id)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step("Execute workload-create command",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload-create command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-create command",
                                        tvaultconf.PASS)
                LOG.debug("Workload-create command executed correctly")

            time.sleep(10)
            workload_id = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(workload_id))
            if (workload_id != None):
                self.wait_for_workload_tobe_available(workload_id)
                if (self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.workload_delete, workload_id)

            ### Full snapshot ###

            self.created = False

            #Create snapshot with CLI command
            create_snapshot = command_argument_string.snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-snapshot command with --full",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command did not execute correctly for full snapshot")
            else:
                reporting.add_test_step(
                    "Execute workload-snapshot command with --full",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly for full snapshot")

            snapshot_id = query_data.get_inprogress_snapshot_id(workload_id)
            LOG.debug("Snapshot ID: " + str(snapshot_id))
            wc = self.wait_for_snapshot_tobe_available(workload_id,
                                                       snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Full snapshot", tvaultconf.PASS)
                self.created = True
            else:
                if (str(wc) == "error"):
                    pass
            if (self.created == False):
                reporting.add_test_step("Full snapshot", tvaultconf.FAIL)
                raise Exception("Workload snapshot did not get created")

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.snapshot_delete, workload_id, snapshot_id)

            #Add some more data to files on VM
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.addCustomfilesOnLinuxVM(ssh, mount_points[0], 2)
            ssh.close()

            ### Incremental snapshot ###

            self.created = False

            #Create incremental snapshot using CLI command
            create_snapshot = command_argument_string.incr_snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step("Execute workload-snapshot command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-snapshot command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            incr_snapshot_id = query_data.get_inprogress_snapshot_id(
                workload_id)
            LOG.debug("Incremental Snapshot ID: " + str(incr_snapshot_id))
            #Wait for incremental snapshot to complete
            wc = self.wait_for_snapshot_tobe_available(workload_id,
                                                       incr_snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Incremental snapshot",
                                        tvaultconf.PASS)
                LOG.debug(
                    "Workload incremental snapshot successfully completed")
                self.created = True
            if (self.created == False):
                reporting.add_test_step("Incremental snapshot",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload incremental snapshot did not get created")

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.snapshot_delete, workload_id,
                                incr_snapshot_id)

            ### Selective restore ###

            instance_details = []
            network_details = []
            restored_vm_details = []
            vms_details_after_restore = []
            temp_vdisks_data = []

            int_net_1_name = self.get_net_name(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_name" + str(int_net_1_name))
            int_net_1_subnets = self.get_subnet_id(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_subnet" + str(int_net_1_subnets))

            temp_vdisks_data.append([{
                'id': volume_id,
                'availability_zone': CONF.volume.volume_availability_zone,
                'new_volume_type': CONF.volume.volume_type
            }])

            LOG.debug("Vdisks details for restore" + str(temp_vdisks_data))

            #Create instance details for restore.json
            vm_name = "tempest_test_vm_" + vm_id + "_restored"
            temp_instance_data = {
                'id': vm_id,
                'availability_zone': CONF.compute.vm_availability_zone,
                'include': True,
                'restore_boot_disk': True,
                'name': vm_name,
                'vdisks': temp_vdisks_data[0]
            }
            instance_details.append(temp_instance_data)
            LOG.debug("Instance details for restore: " + str(instance_details))

            #Create network details for restore.json
            snapshot_network = {
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            target_network = {
                'name': int_net_1_name,
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            network_details = [{
                'snapshot_network': snapshot_network,
                'target_network': target_network
            }]
            LOG.debug("Network details for restore: " + str(network_details))
            LOG.debug("Snapshot id : " + str(snapshot_id))

            #Trigger selective restore
            restore_id_1 = self.snapshot_selective_restore(
                workload_id,
                snapshot_id,
                restore_name=tvaultconf.restore_name,
                restore_cleanup=True,
                instance_details=instance_details,
                network_details=network_details)
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if (self.getRestoreStatus(workload_id, snapshot_id,
                                      restore_id_1) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                raise Exception("Selective restore failed")

            #Fetch instance details after restore
            vm_list = self.get_restored_vm_list(restore_id_1)
            LOG.debug("Restored vm(selective) ID : " + str(vm_list))

            time.sleep(40)
            floating_ip_2 = self.assign_floating_ips(vm_list[0], True)
            LOG.debug("Floating ip assigned to selective restore vm -> " +
                      str(floating_ip_2))
            md5sums_dir_after = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_2))
            self.execute_command_disk_mount(ssh, str(floating_ip_2),
                                            [volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_dir_after = self.calcmd5sum(floating_ip_2, mount_points[0])
            ssh.close()

            LOG.debug("md5sums_dir_before")
            LOG.debug(md5sums_dir_before[str(floating_ip_1)])
            LOG.debug("md5sums_dir_after")
            LOG.debug(md5sums_dir_after[str(floating_ip_2)])

            if md5sums_dir_before[str(floating_ip_1)] == md5sums_dir_after[str(
                    floating_ip_2)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            for id in range(len(vm_list)):
                restored_vm_details.append(self.get_vm_details(vm_list[id]))
            LOG.debug("Restored vm details list: " + str(restored_vm_details))

            vms_details_after_restore = self.get_vms_details_list(
                restored_vm_details)
            LOG.debug("VM details after restore: " +
                      str(vms_details_after_restore))

            #Compare the data before and after restore
            for i in range(len(vms_details_after_restore)):
                if (vms_details_after_restore[i]['network_name'] ==
                        int_net_1_name):
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.PASS)
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error(
                        "Restored network: " +
                        str(vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)

            ### In-place Restore ###

            #Create in-place restore with CLI command
            restore_command = command_argument_string.inplace_restore + str(
                tvaultconf.restore_filename) + " " + str(incr_snapshot_id)

            LOG.debug("inplace restore cli command: " + str(restore_command))
            restore_json = json.dumps({
                'openstack': {
                    'instances': [{
                        'restore_boot_disk':
                        True,
                        'include':
                        True,
                        'id':
                        vm_id,
                        'vdisks': [{
                            'restore_cinder_volume': True,
                            'id': volume_id,
                            'new_volume_type': CONF.volume.volume_type
                        }],
                    }],
                    'networks_mapping': {
                        'networks': []
                    }
                },
                'restore_type': 'inplace',
                'type': 'openstack'
            })
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            #Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(json.loads(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #get restore id from database
            restore_id_2 = query_data.get_snapshot_restore_id(incr_snapshot_id)
            self.wait_for_snapshot_tobe_available(workload_id,
                                                  incr_snapshot_id)

            #get in-place restore status
            if (self.getRestoreStatus(workload_id, incr_snapshot_id,
                                      restore_id_2) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            #Fetch instance details after restore
            vm_list = self.get_restored_vm_list(restore_id_2)
            LOG.debug("Restored vm(In-place) ID : " + str(vm_list))

            time.sleep(40)
            md5sums_dir_after = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.execute_command_disk_mount(ssh, str(floating_ip_1),
                                            [volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_dir_after = self.calcmd5sum(floating_ip_1, mount_points[0])
            ssh.close()

            LOG.debug("<----md5sums_dir_before---->")
            LOG.debug(md5sums_dir_before[str(floating_ip_1)])
            LOG.debug("<----md5sums_dir_after---->")
            LOG.debug(md5sums_dir_after[str(floating_ip_1)])

            if md5sums_dir_before[str(floating_ip_1)] == md5sums_dir_after[str(
                    floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id,
                                incr_snapshot_id, restore_id_2)
                LOG.debug("Snapshot Restore(in-place) deleted successfully")

            ### One-click restore ###

            mdb = self.calcmd5sum(floating_ip_1, mount_points[0])
            LOG.debug(
                "MD5SUMS before deleting the instance for one click restore : "
                + str(mdb))

            self.disassociate_floating_ip_from_server(floating_ip_1, vm_id)
            self.detach_volume(vm_id, volume_id)

            #Delete the original instance
            self.delete_vm(vm_id)
            LOG.debug(
                "Instance deleted successfully for one click restore : " +
                str(vm_id))
            time.sleep(10)

            #Delete volume attached to original instance
            self.delete_volume(volume_id)
            LOG.debug("Volumes deleted successfully for one click restore : " +
                      str(volume_id))
            deleted = 1

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            wc = query_data.get_snapshot_restore_status(
                tvaultconf.restore_name, snapshot_id)
            LOG.debug("Snapshot restore status: " + str(wc))

            restore_id_3 = query_data.get_snapshot_restore_id(snapshot_id)
            LOG.debug("Restore ID: " + str(restore_id_3))

            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if (self.getRestoreStatus(workload_id, snapshot_id,
                                      restore_id_3) == "available"):
                reporting.add_test_step("One-click restore", tvaultconf.PASS)
                LOG.debug("One-click restore passed")
            else:
                reporting.add_test_step("One-click restore", tvaultconf.FAIL)
                LOG.debug("One-click restore failed")
                raise Exception("One-click restore failed")
            LOG.debug("One-click restore complete")

            #Fetch instance details after restore
            vm_list = self.get_restored_vm_list(restore_id_3)
            LOG.debug("Restored vms : " + str(vm_list))

            mda = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.execute_command_disk_mount(ssh, str(floating_ip_1),
                                            [volumes[0]], [mount_points[0]])
            time.sleep(5)
            mda = self.calcmd5sum(floating_ip_1, mount_points[0])
            LOG.debug(
                "MD5SUMS after deleting the instance for one click restore : "
                + str(mda))
            ssh.close()

            if mdb[str(floating_ip_1)] == mda[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            restored_volumes = self.get_restored_volume_list(restore_id_3)
            LOG.debug("Restored volumes : ")
            LOG.debug(restored_volumes)

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, snapshot_id,
                                restore_id_3)
                self.addCleanup(self.delete_restored_vms, vm_list,
                                restored_volumes)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            if (deleted == 0):
                self.disassociate_floating_ip_from_server(floating_ip_1, vm_id)
                self.detach_volume(vm_id, volume_id)
                self.delete_vm(vm_id)
                time.sleep(10)
                self.delete_volume(volume_id)
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
Beispiel #3
0
    def test_tvault1037_list_restore(self):
        try:
            # Prerequisites
            self.created = False
            self.workload_instances = []

            # Launch instance
            self.vm_id = self.create_vm()
            LOG.debug("VM ID: " + str(self.vm_id))

            # Create volume
            self.volume_id = self.create_volume()
            LOG.debug("Volume ID: " + str(self.volume_id))

            # Attach volume to the instance
            self.attach_volume(self.volume_id, self.vm_id)
            LOG.debug("Volume attached")

            # Create workload
            self.workload_instances.append(self.vm_id)
            self.wid = self.workload_create(
                self.workload_instances,
                tvaultconf.parallel,
                workload_name=tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(self.wid))
            time.sleep(5)

            # Create snapshot
            self.snapshot_id = self.workload_snapshot(self.wid, True,
                                                      tvaultconf.snapshot_name)
            LOG.debug("Snapshot ID: " + str(self.snapshot_id))
            self.wait_for_snapshot_tobe_available(self.wid, self.snapshot_id)

            # Delete instance
            self.delete_vm(self.vm_id)
            LOG.debug("Instance deleted successfully")

            # Delete corresponding volume
            self.delete_volume(self.volume_id)
            LOG.debug("Volume deleted successfully")

            # Create one-click restore
            self.restore_id = self.snapshot_restore(self.wid, self.snapshot_id,
                                                    tvaultconf.restore_name)
            LOG.debug("Restore ID: " + str(self.restore_id))

            # Wait till restore is complete
            wc = query_data.get_snapshot_restore_status(
                tvaultconf.restore_name, self.snapshot_id)
            LOG.debug("Snapshot restore status: " + str(wc))
            while (str(wc) != "available" or str(wc) != "error"):
                time.sleep(5)
                wc = query_data.get_snapshot_restore_status(
                    tvaultconf.restore_name, self.snapshot_id)
                LOG.debug("Snapshot restore status: " + str(wc))
                if (str(wc) == "available"):
                    LOG.debug("Snapshot Restore successfully completed")
                    self.created = True
                    break
                else:
                    if (str(wc) == "error"):
                        break

            if (self.created == False):
                reporting.add_test_step("One click Restore", tvaultconf.FAIL)
                raise Exception("Snapshot Restore did not get created")

            # List Restores using CLI command
            rc = cli_parser.cli_returncode(
                command_argument_string.restore_list)
            if rc != 0:
                reporting.add_test_step("Execute restore-list command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute restore-list command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            wc = query_data.get_available_restores()
            out = cli_parser.cli_output(command_argument_string.restore_list)
            if (int(wc) == int(out)):
                reporting.add_test_step("Verification with DB",
                                        tvaultconf.PASS)
                LOG.debug(
                    "Restore list command listed available restores correctly")
            else:
                reporting.add_test_step("Verification with DB",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Restore list command did not list available restores correctly"
                )
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_6_regression(self):
        reporting.add_test_script(
            str(__name__) + "_one_click_restore_bootfrom_image")
        try:
            if self.exception != "":
                LOG.debug("pre req failed")
                reporting.add_test_step(str(self.exception), tvaultconf.FAIL)
                raise Exception(str(self.exception))
            LOG.debug("pre req completed")

            self.created = False

            #Delete the original instance
            self.delete_vms(self.workload_instances)
            self.delete_key_pair(tvaultconf.key_pair_name)
            self.delete_security_group(self.security_group_id)
            self.delete_flavor(self.flavor_id)
            LOG.debug("Instances deleted successfully")

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + self.snapshot_ids[
                1]
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            wc = query_data.get_snapshot_restore_status(
                tvaultconf.restore_name, self.snapshot_ids[1])
            LOG.debug("Snapshot restore status: " + str(wc))
            while (str(wc) != "available" or str(wc) != "error"):
                time.sleep(5)
                wc = query_data.get_snapshot_restore_status(
                    tvaultconf.restore_name, self.snapshot_ids[1])
                LOG.debug("Snapshot restore status: " + str(wc))
                if (str(wc) == "available"):
                    LOG.debug("Snapshot Restore successfully completed")
                    reporting.add_test_step(
                        "Snapshot one-click restore verification with DB",
                        tvaultconf.PASS)
                    self.created = True
                    break
                else:
                    if (str(wc) == "error"):
                        break

            if (self.created == False):
                reporting.add_test_step(
                    "Snapshot one-click restore verification with DB",
                    tvaultconf.FAIL)
                raise Exception("Snapshot Restore did not get created")

            self.restore_id = query_data.get_snapshot_restore_id(
                self.snapshot_id)
            LOG.debug("Restore ID: " + str(self.restore_id))

            #Fetch instance details after restore
            self.restored_vm_details_list = []

            #restored vms list
            self.vm_list = self.get_restored_vm_list(self.restore_id)
            LOG.debug("Restored vms : " + str(self.vm_list))

            #restored vms all details list
            for id in range(len(self.workload_instances)):
                self.restored_vm_details_list.append(
                    self.get_vm_details(self.vm_list[id]))
            LOG.debug("Restored vm details list: " +
                      str(self.restored_vm_details_list))

            #required details of restored vms
            self.vms_details_after_restore = self.get_vms_details_list(
                self.restored_vm_details_list)
            LOG.debug("VM details after restore: " +
                      str(self.vms_details_after_restore))

            #Verify floating ips
            self.floating_ips_after_restore = []
            for i in range(len(self.vms_details_after_restore)):
                self.floating_ips_after_restore.append(
                    self.vms_details_after_restore[i]['floating_ip'])
            if (self.floating_ips_after_restore.sort() ==
                    self.floating_ips_list.sort()):
                reporting.add_test_step("Floating ip verification",
                                        tvaultconf.PASS)
            else:
                LOG.error("Floating ips before restore: " +
                          str(self.floating_ips_list.sort()))
                LOG.error("Floating ips after restore: " +
                          str(self.floating_ips_after_restore.sort()))
                reporting.add_test_step("Floating ip verification",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

        #calculate md5sum after restore
            tree = lambda: collections.defaultdict(tree)
            md5_sum_after_oneclick_restore = tree()
            for floating_ip in self.floating_ips_list:
                for mount_point in mount_points:
                    ssh = self.SshRemoteMachineConnectionWithRSAKey(
                        str(floating_ip))
                    md5_sum_after_oneclick_restore[str(floating_ip)][str(
                        mount_point)] = self.calculatemmd5checksum(
                            ssh, mount_point)
                    ssh.close()
            LOG.debug("md5_sum_after_oneclick_restore" +
                      str(md5_sum_after_oneclick_restore))

            #md5sum verification
            if (self.md5sums_dir_before == md5_sum_after_oneclick_restore):
                reporting.add_test_step("Md5 Verification", tvaultconf.PASS)
            else:
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.add_test_step("Md5 Verification", tvaultconf.FAIL)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_tvault1040_oneclick_restore(self):
        try:
            #Prerequisites
            self.created = False
            self.workload_instances = []

            #Launch instance
            self.vm_id = self.create_vm(vm_cleanup=False)
            LOG.debug("VM ID: " + str(self.vm_id))

            #Create volume
            self.volume_id = self.create_volume(volume_cleanup=False)
            LOG.debug("Volume ID: " + str(self.volume_id))

            #Attach volume to the instance
            self.attach_volume(self.volume_id,
                               self.vm_id,
                               attach_cleanup=False)
            LOG.debug("Volume attached")

            #Create workload
            self.workload_instances.append(self.vm_id)
            self.wid = self.workload_create(
                self.workload_instances,
                tvaultconf.parallel,
                workload_name=tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(self.wid))
            time.sleep(5)

            #Create snapshot
            self.snapshot_id = self.workload_snapshot(self.wid, True,
                                                      tvaultconf.snapshot_name)
            LOG.debug("Snapshot ID: " + str(self.snapshot_id))

            #Wait till snapshot is complete
            self.wait_for_snapshot_tobe_available(self.wid, self.snapshot_id)

            #Delete the original instance
            self.delete_vm(self.vm_id)
            LOG.debug("Instance deleted successfully")

            #Delete corresponding volume
            self.delete_volume(self.volume_id)
            LOG.debug("Volume deleted successfully")

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + self.snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            wc = query_data.get_snapshot_restore_status(
                tvaultconf.restore_name, self.snapshot_id)
            LOG.debug("Snapshot restore status: " + str(wc))
            while (str(wc) != "available" or str(wc) != "error"):
                time.sleep(5)
                wc = query_data.get_snapshot_restore_status(
                    tvaultconf.restore_name, self.snapshot_id)
                LOG.debug("Snapshot restore status: " + str(wc))
                if (str(wc) == "available"):
                    LOG.debug("Snapshot Restore successfully completed")
                    reporting.add_test_step(
                        "Snapshot one-click restore verification with DB",
                        tvaultconf.PASS)
                    self.created = True
                    break
                else:
                    if (str(wc) == "error"):
                        break

            if (self.created == False):
                reporting.add_test_step(
                    "Snapshot one-click restore verification with DB",
                    tvaultconf.FAIL)
                raise Exception("Snapshot Restore did not get created")

            self.restore_id = query_data.get_snapshot_restore_id(
                self.snapshot_id)
            LOG.debug("Restore ID: " + str(self.restore_id))

            #Cleanup
            #self.volume_snapshots = self.get_available_volume_snapshots()
            #self.delete_volume_snapshots(self.volume_snapshots)

            #Delete restore for snapshot
            self.restore_delete(self.wid, self.snapshot_id, self.restore_id)
            LOG.debug("Snapshot Restore deleted successfully")

            #Delete restored VM instance and volume
            #self.delete_restored_vms(self.restore_id)
            #LOG.debug("Restored VM and volume deleted successfully")
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()