def test_tvault1033_create_workload(self):
        try:
            # Prerequisites
            self.created = False
            # Launch instance
            self.vm_id = self.create_vm()
            LOG.debug("VM ID: " + str(self.vm_id))

            # Create volume
            self.volume_id = self.create_volume()
            LOG.debug("Volume ID: " + str(self.volume_id))

            # Attach volume to the instance
            self.attach_volume(self.volume_id, self.vm_id)
            LOG.debug("Volume attached")

            # Create workload with CLI command
            workload_create = command_argument_string.workload_create + \
                " --instance instance-id=" + str(self.vm_id)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-create command", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-create command", tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            time.sleep(10)
            self.wid = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(self.wid))
            if(self.wid is not None):
                self.wait_for_workload_tobe_available(self.wid)
                if(self.getWorkloadStatus(self.wid) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            # Cleanup
            # Delete workload
            self.workload_delete(self.wid)
            LOG.debug("Workload deleted successfully")
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
예제 #2
0
    def test_1_volume_volume(self):
        try:
            ### VM and Workload ###

            reporting.add_test_script(str(__name__))

            deleted = 0
            global volumes
            mount_points = ["mount_data_a", "mount_data_b"]
            md5sums_dir_before = {}

            #Create Keypair
            kp = self.create_key_pair(tvaultconf.key_pair_name,
                                      keypair_cleanup=True)
            LOG.debug("Key_pair : " + str(kp))

            #Create bootable volume
            boot_volume_id = self.create_volume(
                image_id=CONF.compute.image_ref, volume_cleanup=False)
            self.set_volume_as_bootable(boot_volume_id)
            LOG.debug("Bootable Volume ID : " + str(boot_volume_id))

            self.block_mapping_details = [{
                "source_type": "volume",
                "delete_on_termination": "false",
                "boot_index": 0,
                "uuid": boot_volume_id,
                "destination_type": "volume"
            }]

            #Create instance
            vm_id = self.create_vm(
                key_pair=kp,
                image_id="",
                block_mapping_data=self.block_mapping_details,
                vm_cleanup=False)
            LOG.debug("VM ID : " + str(vm_id))
            time.sleep(30)

            #Create and attach volume
            volume_id = self.create_volume(
                volume_type_id=CONF.volume.volume_type_id,
                volume_cleanup=False)
            LOG.debug("Volume ID: " + str(volume_id))
            volumes = tvaultconf.volumes_parts

            self.attach_volume(volume_id, vm_id, attach_cleanup=False)
            LOG.debug("Volume attached")

            #Assign floating IP
            floating_ip_1 = self.assign_floating_ips(vm_id, False)
            LOG.debug("Assigned floating IP : " + str(floating_ip_1))
            LOG.debug("Sleeping for 40 sec")
            time.sleep(40)

            #Adding data and calculating md5sums
            self.data_ops(floating_ip_1, mount_points[0], 3)
            LOG.debug("Created disk and mounted the attached volume")

            md5sums_dir_before = self.calcmd5sum(floating_ip_1,
                                                 mount_points[0])
            LOG.debug("MD5sums for directory on original vm : " +
                      str(md5sums_dir_before))

            workload_create = command_argument_string.workload_create + " --instance instance-id=" + str(
                vm_id)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step("Execute workload-create command",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload-create command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-create command",
                                        tvaultconf.PASS)
                LOG.debug("Workload-create command executed correctly")

            time.sleep(10)
            workload_id = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(workload_id))
            if (workload_id != None):
                self.wait_for_workload_tobe_available(workload_id)
                if (self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.workload_delete, workload_id)

            ### Full Snapshot ###

            self.created = False

            #Create snapshot with CLI command
            create_snapshot = command_argument_string.snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-snapshot command with --full",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command did not execute correctly for full snapshot")
            else:
                reporting.add_test_step(
                    "Execute workload-snapshot command with --full",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly for full snapshot")

            snapshot_id = query_data.get_inprogress_snapshot_id(workload_id)
            LOG.debug("Snapshot ID: " + str(snapshot_id))
            wc = self.wait_for_snapshot_tobe_available(workload_id,
                                                       snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Full snapshot", tvaultconf.PASS)
                self.created = True
            else:
                if (str(wc) == "error"):
                    pass
            if (self.created == False):
                reporting.add_test_step("Full snapshot", tvaultconf.FAIL)
                raise Exception("Workload snapshot did not get created")

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.snapshot_delete, workload_id, snapshot_id)

            LOG.debug("Sleeping for 40s")
            time.sleep(40)

            #Add some more data to files on VM
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.addCustomfilesOnLinuxVM(ssh, mount_points[0], 2)
            ssh.close()

            ### Incremental snapshot ###

            self.created = False

            #Create incremental snapshot using CLI command
            create_snapshot = command_argument_string.incr_snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step("Execute workload-snapshot command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-snapshot command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            incr_snapshot_id = query_data.get_inprogress_snapshot_id(
                workload_id)
            LOG.debug("Incremental Snapshot ID: " + str(incr_snapshot_id))
            #Wait for incremental snapshot to complete
            wc = self.wait_for_snapshot_tobe_available(workload_id,
                                                       incr_snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Incremental snapshot",
                                        tvaultconf.PASS)
                LOG.debug(
                    "Workload incremental snapshot successfully completed")
                self.created = True
            if (self.created == False):
                reporting.add_test_step("Incremental snapshot",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload incremental snapshot did not get created")

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.snapshot_delete, workload_id,
                                incr_snapshot_id)

            ### Selective restore ###

            instance_details = []
            network_details = []
            restored_vm_details = []
            vms_details_after_restore = []
            temp_vdisks_data = []

            int_net_1_name = self.get_net_name(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_name" + str(int_net_1_name))
            int_net_1_subnets = self.get_subnet_id(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_subnet" + str(int_net_1_subnets))

            temp_vdisks_data.append([{
                'id': volume_id,
                'availability_zone': CONF.volume.volume_availability_zone,
                'new_volume_type': CONF.volume.volume_type
            }])

            LOG.debug("Vdisks details for restore" + str(temp_vdisks_data))

            #Create instance details for restore.json
            vm_name = "tempest_test_vm_" + vm_id + "_selectively_restored"
            temp_instance_data = {
                'id': vm_id,
                'availability_zone': CONF.compute.vm_availability_zone,
                'include': True,
                'restore_boot_disk': True,
                'name': vm_name,
                'vdisks': temp_vdisks_data[0]
            }
            instance_details.append(temp_instance_data)
            LOG.debug("Instance details for restore: " + str(instance_details))

            #Create network details for restore.json
            snapshot_network = {
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            target_network = {
                'name': int_net_1_name,
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            network_details = [{
                'snapshot_network': snapshot_network,
                'target_network': target_network
            }]
            LOG.debug("Network details for restore: " + str(network_details))
            LOG.debug("Snapshot id : " + str(snapshot_id))

            #Trigger selective restore
            restore_id_1 = self.snapshot_selective_restore(
                workload_id,
                snapshot_id,
                restore_name=tvaultconf.restore_name,
                restore_cleanup=True,
                instance_details=instance_details,
                network_details=network_details)
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if (self.getRestoreStatus(workload_id, snapshot_id,
                                      restore_id_1) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                raise Exception("Selective restore failed")

            #Fetch instance details after restore
            vm_list = self.get_restored_vm_list(restore_id_1)
            LOG.debug("Restored vm(selective) ID : " + str(vm_list))
            time.sleep(60)
            floating_ip_2 = self.assign_floating_ips(vm_list[0], True)
            LOG.debug("Floating ip assigned to selective restore vm -> " +
                      str(floating_ip_2))
            md5sums_dir_after = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_2))
            self.execute_command_disk_mount(ssh, str(floating_ip_2),
                                            [volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_dir_after = self.calcmd5sum(floating_ip_2, mount_points[0])
            ssh.close()

            LOG.debug("MD5SUMS before restore")
            LOG.debug(md5sums_dir_before[str(floating_ip_1)])
            LOG.debug("MD5SUMS after restore")
            LOG.debug(md5sums_dir_after[str(floating_ip_2)])

            if md5sums_dir_before[str(floating_ip_1)] == md5sums_dir_after[str(
                    floating_ip_2)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            for id in range(len(vm_list)):
                restored_vm_details.append(self.get_vm_details(vm_list[id]))
            LOG.debug("Restored vm details list: " + str(restored_vm_details))

            vms_details_after_restore = self.get_vms_details_list(
                restored_vm_details)
            LOG.debug("VM details after restore: " +
                      str(vms_details_after_restore))
            #Compare the data before and after restore
            for i in range(len(vms_details_after_restore)):
                if (vms_details_after_restore[i]['network_name'] ==
                        int_net_1_name):
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.PASS)
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error(
                        "Restored network: " +
                        str(vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)

            ### In-place restore ###

            #Create in-place restore with CLI command
            restore_command = command_argument_string.inplace_restore + str(
                tvaultconf.restore_filename) + " " + str(incr_snapshot_id)

            LOG.debug("inplace restore cli command: " + str(restore_command))
            #Restore.json with only volume 2 excluded
            restore_json = json.dumps({
                'openstack': {
                    'instances': [{
                        'restore_boot_disk':
                        True,
                        'include':
                        True,
                        'id':
                        vm_id,
                        'vdisks': [{
                            'restore_cinder_volume': True,
                            'id': volume_id,
                            'new_volume_type': CONF.volume.volume_type
                        }],
                    }],
                    'networks_mapping': {
                        'networks': []
                    }
                },
                'restore_type': 'inplace',
                'type': 'openstack'
            })
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            #Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(json.loads(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #get restore id from database
            restore_id_2 = query_data.get_snapshot_restore_id(incr_snapshot_id)
            self.wait_for_snapshot_tobe_available(workload_id,
                                                  incr_snapshot_id)

            #get in-place restore status
            if (self.getRestoreStatus(workload_id, incr_snapshot_id,
                                      restore_id_2) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            #Fetch instance details after restore
            vm_list = []
            vm_list = self.get_restored_vm_list(restore_id_2)
            LOG.debug("Restored vm(In-place) ID : " + str(vm_list))

            time.sleep(40)
            md5sums_dir_after = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.execute_command_disk_mount(ssh, str(floating_ip_1),
                                            [volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_dir_after = self.calcmd5sum(floating_ip_1, mount_points[0])
            ssh.close()

            LOG.debug("<----md5sums_dir_before---->")
            LOG.debug(md5sums_dir_before[str(floating_ip_1)])
            LOG.debug("<----md5sums_dir_after---->")
            LOG.debug(md5sums_dir_after[str(floating_ip_1)])

            if md5sums_dir_before[str(floating_ip_1)] == md5sums_dir_after[str(
                    floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            #Delete restore for snapshot
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id,
                                incr_snapshot_id, restore_id_2)

            ### One-click restore ###

            mdb = self.calcmd5sum(floating_ip_1, mount_points[0])
            LOG.debug(
                "MD5SUMS before deleting the instance for one click restore : "
                + str(mdb))

            self.disassociate_floating_ip_from_server(floating_ip_1, vm_id)
            self.detach_volume(vm_id, volume_id)

            #Delete the original instance
            self.delete_vm(vm_id)
            LOG.debug(
                "Instance deleted successfully for one click restore : " +
                str(vm_id))
            time.sleep(10)

            #Delete bootable volume of original instance
            self.delete_volume(boot_volume_id)
            LOG.debug("Bootable volume of original instance deleted")

            #Delete volume attached to original instance
            self.delete_volume(volume_id)
            LOG.debug("Volumes deleted successfully for one click restore : " +
                      str(volume_id))

            deleted = 1

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            restore_id_3 = query_data.get_snapshot_restore_id(snapshot_id)
            LOG.debug("Restore ID: " + str(restore_id_3))

            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if (self.getRestoreStatus(workload_id, snapshot_id,
                                      restore_id_3) == "available"):
                reporting.add_test_step("One-click restore", tvaultconf.PASS)
                LOG.debug("One-click restore passed")
            else:
                reporting.add_test_step("One-click restore", tvaultconf.FAIL)
                LOG.debug("One-click restore failed")
                raise Exception("One-click restore failed")
            LOG.debug("One-click restore complete")

            #Fetch instance details after restore
            vm_list = []
            vm_list = self.get_restored_vm_list(restore_id_3)
            LOG.debug("Restored vms : " + str(vm_list))

            mda = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.execute_command_disk_mount(ssh, str(floating_ip_1),
                                            [volumes[0]], [mount_points[0]])
            time.sleep(5)
            mda = self.calcmd5sum(floating_ip_1, mount_points[0])
            LOG.debug(
                "MD5SUMS after deleting the instance for one click restore : "
                + str(mda))
            ssh.close()

            if mdb[str(floating_ip_1)] == mda[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            restored_volumes = []
            restored_volumes = self.get_restored_volume_list(restore_id_3)
            LOG.debug("Restored volumes : ")
            LOG.debug(restored_volumes)

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, snapshot_id,
                                restore_id_3)
                time.sleep(30)
                self.addCleanup(self.delete_restored_vms, vm_list,
                                restored_volumes)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            if (deleted == 0):
                self.disassociate_floating_ip_from_server(floating_ip_1, vm_id)
                self.detach_volume(vm_id, volume_id)
                self.delete_vm(vm_id)
                time.sleep(10)
                self.delete_volume(volume_id)
                self.delete_volume(boot_volume_id)
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
예제 #3
0
    def test_1_volume_volume(self):
        try:
            ### VM and Workload ###
            tests = [['tempest.api.workloadmgr.restore.test_volume_vol_Selective-restore',
                      0],
                     ['tempest.api.workloadmgr.restore.test_volume_vol_Inplace-restore',
                      0],
                     ['tempest.api.workloadmgr.restore.test_volume_vol_Oneclick-restore',
                      0]]
            reporting.add_test_script(tests[0][0])
            deleted = 0
            global volumes
            mount_points = ["mount_data_a", "mount_data_b"]
            md5sums_before_full = {}

            # Create Keypair
            kp = self.create_key_pair(
                tvaultconf.key_pair_name, keypair_cleanup=True)
            LOG.debug("Key_pair : " + str(kp))

            # Create bootable volume
            boot_volume_id = self.create_volume(
                size=tvaultconf.bootfromvol_vol_size,
                image_id=CONF.compute.image_ref,
                volume_cleanup=False)
            self.set_volume_as_bootable(boot_volume_id)
            LOG.debug("Bootable Volume ID : " + str(boot_volume_id))

            self.block_mapping_details = [{"source_type": "volume",
                                           "delete_on_termination": "false",
                                           "boot_index": 0,
                                           "uuid": boot_volume_id,
                                           "destination_type": "volume"}]

            # Create instance
            vm_id = self.create_vm(
                key_pair=kp,
                image_id="",
                block_mapping_data=self.block_mapping_details,
                vm_cleanup=False)
            LOG.debug("VM ID : " + str(vm_id))
            time.sleep(30)

            # Create and attach volume
            volume_id = self.create_volume(
                volume_type_id=CONF.volume.volume_type_id,
                volume_cleanup=False)
            LOG.debug("Volume ID: " + str(volume_id))
            volumes = tvaultconf.volumes_parts

            self.attach_volume(volume_id, vm_id, attach_cleanup=False)
            LOG.debug("Volume attached")

            # Assign floating IP
            floating_ip_1 = self.assign_floating_ips(vm_id, False)
            LOG.debug("Assigned floating IP : " + str(floating_ip_1))
            LOG.debug("Sleeping for 40 sec")
            time.sleep(40)

            if CONF.validation.ssh_user == 'ubuntu':
                self.install_qemu_ga(floating_ip_1)

            # Adding data and calculating md5sums
            self.data_ops(floating_ip_1, mount_points[0], 3)
            LOG.debug("Created disk and mounted the attached volume")

            md5sums_before_full = self.calcmd5sum(
                floating_ip_1, mount_points[0])
            LOG.debug("\nMD5SUM of the data before full snapshot : {}\n".format(
                md5sums_before_full))

            workload_create = command_argument_string.workload_create + \
                " --instance instance-id=" + str(vm_id)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-create command", tvaultconf.FAIL)
                raise Exception(
                    "Workload-create command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-create command", tvaultconf.PASS)
                LOG.debug("Workload-create command executed correctly")

            time.sleep(10)
            workload_id = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(workload_id))
            if(workload_id is not None):
                self.wait_for_workload_tobe_available(workload_id)
                if(self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)

            if (tvaultconf.cleanup):
                self.addCleanup(self.workload_delete, workload_id)

            ### Full Snapshot ###

            snapshot_id = self.create_snapshot(workload_id, is_full=True)

            # Add some more data to files on VM
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.addCustomfilesOnLinuxVM(ssh, mount_points[0], 2)
            ssh.close()
            md5sums_before_incremental = {}
            md5sums_before_incremental = self.calcmd5sum(
                floating_ip_1, mount_points[0])
            LOG.debug("\nMD5SUM after adding additional data before incremental snapshot : {}\n".format(
                md5sums_before_incremental))

            ### Incremental snapshot ###

            incr_snapshot_id = self.create_snapshot(workload_id, is_full=False)

            ### Selective restore ###

            rest_details = {}
            rest_details['rest_type'] = 'selective'
            rest_details['network_id'] = CONF.network.internal_network_id
            rest_details['subnet_id'] = self.get_subnet_id(
                CONF.network.internal_network_id)
            volumeslist = [boot_volume_id, volume_id]
            rest_details['instances'] = {vm_id: volumeslist}

            payload = self.create_restore_json(rest_details)
            # Trigger selective restore
            restore_id_1 = self.snapshot_selective_restore(
                workload_id,
                snapshot_id,
                restore_name=tvaultconf.restore_name,
                restore_cleanup=True,
                instance_details=payload['instance_details'],
                network_details=payload['network_details'])
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if(self.getRestoreStatus(workload_id, snapshot_id, restore_id_1) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                raise Exception("Selective restore failed")

            # Fetch instance details after restore
            vm_list = self.get_restored_vm_list(restore_id_1)
            LOG.debug("Restored vm(selective) ID : " + str(vm_list))
            time.sleep(60)
            floating_ip_2 = self.assign_floating_ips(vm_list[0], True)
            LOG.debug(
                "Floating ip assigned to selective restore vm -> " +
                str(floating_ip_2))
            md5sums_after_selective = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_2))
            self.execute_command_disk_mount(ssh, str(floating_ip_2), [
                                            volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_after_selective = self.calcmd5sum(
                floating_ip_2, mount_points[0])
            ssh.close()

            LOG.debug("MD5SUMS before restore")
            LOG.debug(md5sums_before_full[str(floating_ip_1)])
            LOG.debug("MD5SUMS after selective restore")
            LOG.debug(md5sums_after_selective[str(floating_ip_2)])

            if md5sums_before_full[str(
                floating_ip_1)] == md5sums_after_selective[str(floating_ip_2)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step(
                    "Md5 Verification for volume", tvaultconf.PASS)
                reporting.set_test_script_status(tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step(
                    "Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            restored_vm_details = []
            for id in range(len(vm_list)):
                restored_vm_details.append(self.get_vm_details(vm_list[id]))
            LOG.debug("Restored vm details list: " + str(restored_vm_details))

            vms_details_after_restore = self.get_vms_details_list(
                restored_vm_details)
            LOG.debug("VM details after restore: " +
                      str(vms_details_after_restore))
            # Compare the data before and after restore
            int_net_1_name = self.get_net_name(
                CONF.network.internal_network_id)
            for i in range(len(vms_details_after_restore)):
                if(vms_details_after_restore[i]['network_name'] == int_net_1_name):
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1), tvaultconf.PASS)
                    tests[0][1] = 1
                    reporting.test_case_to_write()
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error("Restored network: " +
                              str(vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1), tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
                    reporting.test_case_to_write()

            ### In-place restore ###

            rest_details = {}
            rest_details['rest_type'] = 'inplace'
            rest_details['instances'] = {vm_id: volumeslist}

            reporting.add_test_script(tests[1][0])
            # Create in-place restore with CLI command
            restore_command = command_argument_string.inplace_restore + \
                str(tvaultconf.restore_filename) + " " + str(snapshot_id)
            payload = self.create_restore_json(rest_details)
            restore_json = json.dumps(payload)
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            # Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(yaml.safe_load(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Triggering In-Place restore via CLI", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Triggering In-Place restore via CLI", tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            # get restore id from database
            restore_id_2 = query_data.get_snapshot_restore_id(snapshot_id)
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)

            # get in-place restore status
            if(self.getRestoreStatus(workload_id, snapshot_id, restore_id_2) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            # Fetch instance details after restore
            vm_list = []
            vm_list = self.get_restored_vm_list(restore_id_2)
            LOG.debug("Restored vm(In-place) ID : " + str(vm_list))

            time.sleep(60)
            md5sums_after_inplace = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.execute_command_disk_mount(ssh, str(floating_ip_1), [
                                            volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_after_inplace = self.calcmd5sum(
                floating_ip_1, mount_points[0])
            ssh.close()

            LOG.debug("<----md5sums_before_full---->")
            LOG.debug(md5sums_before_full[str(floating_ip_1)])
            LOG.debug("<----md5sums_after_inplace---->")
            LOG.debug(md5sums_after_inplace[str(floating_ip_1)])

            if md5sums_before_full[str(
                floating_ip_1)] == md5sums_after_inplace[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step(
                    "Md5 Verification for volume", tvaultconf.PASS)
                tests[1][1] = 1
                reporting.set_test_script_status(tvaultconf.PASS)
                reporting.test_case_to_write()
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step(
                    "Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.test_case_to_write()

            # Delete restore for snapshot
            if (tvaultconf.cleanup):
                self.addCleanup(self.restore_delete, workload_id,
                                snapshot_id, restore_id_2)

            ### One-click restore ###

            reporting.add_test_script(tests[2][0])

            self.detach_volume(vm_id, volume_id)

            # Delete the original instance
            self.delete_vm(vm_id)
            LOG.debug(
                "Instance deleted successfully for one click restore : " +
                str(vm_id))
            time.sleep(10)

            # Delete bootable volume of original instance
            self.delete_volume(boot_volume_id)
            LOG.debug("Bootable volume of original instance deleted")

            # Delete volume attached to original instance
            self.delete_volume(volume_id)
            LOG.debug(
                "Volumes deleted successfully for one click restore : " +
                str(volume_id))

            deleted = 1

            # Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + incr_snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            restore_id_3 = query_data.get_snapshot_restore_id(incr_snapshot_id)
            LOG.debug("Restore ID: " + str(restore_id_3))

            self.wait_for_snapshot_tobe_available(
                workload_id, incr_snapshot_id)
            if(self.getRestoreStatus(workload_id, incr_snapshot_id, restore_id_3) == "available"):
                reporting.add_test_step("One-click restore", tvaultconf.PASS)
                LOG.debug("One-click restore passed")
            else:
                reporting.add_test_step("One-click restore", tvaultconf.FAIL)
                LOG.debug("One-click restore failed")
                raise Exception("One-click restore failed")
            LOG.debug("One-click restore complete")

            # Fetch instance details after restore
            vm_list = []
            vm_list = self.get_restored_vm_list(restore_id_3)
            LOG.debug("Restored vms : " + str(vm_list))

            md5sums_after_1clickrestore = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.execute_command_disk_mount(ssh, str(floating_ip_1), [
                                            volumes[0]], [mount_points[0]])
            time.sleep(5)
            md5sums_after_1clickrestore = self.calcmd5sum(
                floating_ip_1, mount_points[0])
            LOG.debug("MD5SUMS after one click restore : {}".format(
                md5sums_after_1clickrestore))
            ssh.close()

            if md5sums_before_incremental[str(
                floating_ip_1)] == md5sums_after_1clickrestore[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step(
                    "Md5 Verification for volume", tvaultconf.PASS)
                tests[2][0] = 1
                reporting.set_test_script_status(tvaultconf.PASS)
                reporting.test_case_to_write()
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step(
                    "Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.test_case_to_write()

            restored_volumes = []
            restored_volumes = self.get_restored_volume_list(restore_id_3)
            LOG.debug("Restored volumes : ")
            LOG.debug(restored_volumes)

            if (tvaultconf.cleanup):
                self.addCleanup(self.restore_delete, workload_id,
                                incr_snapshot_id, restore_id_3)
                time.sleep(30)
                self.addCleanup(self.delete_restored_vms,
                                vm_list, restored_volumes)

        except Exception as e:
            LOG.error("Exception: " + str(e))
            if (deleted == 0):
                try:
                    self.delete_vm(vm_id)
                except BaseException:
                    pass
                time.sleep(10)
                try:
                    self.delete_volume(volume_id)
                    self.delete_volume(boot_volume_id)
                except BaseException:
                    pass
            for test in tests:
                if test[1] != 1:
                    reporting.add_test_script(test[0])
                    reporting.set_test_script_status(tvaultconf.FAIL)
                    reporting.test_case_to_write()
    def test_tvault1034_create_scheduled_workload(self):
        try:
            #Prerequisites
            self.created = False
            #Launch instance
            self.vm_id = self.create_vm()
            LOG.debug("VM ID: " + str(self.vm_id))

            #Create volume
            self.volume_id = self.create_volume()
            LOG.debug("Volume ID: " + str(self.volume_id))

            #Attach volume to the instance
            self.attach_volume(self.volume_id, self.vm_id)
            LOG.debug("Volume attached")

            #Create workload with CLI command
            self.start_date = time.strftime("%x")
            self.start_time = time.strftime("%I:%M %p")
            interval = tvaultconf.interval
            retention_policy_type = tvaultconf.retention_policy_type
            retention_policy_value = tvaultconf.retention_policy_value
            workload_create = command_argument_string.workload_create + " --instance instance-id=" +str(self.vm_id)\
                + " --jobschedule start_date=" + str(self.start_date) + " --jobschedule start_time='" + str(self.start_time)\
                + "' --jobschedule interval='" + str(interval) + "' --jobschedule retention_policy_type='"\
                + str(retention_policy_type) + "' --jobschedule retention_policy_value=" + str(retention_policy_value)\
         + " --jobschedule enabled=True"
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-create command with scheduler enabled",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-create command with scheduler enabled",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")
            time.sleep(10)
            self.wid = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(self.wid))
            self.wait_for_workload_tobe_available(self.wid)
            if (self.getWorkloadStatus(self.wid) == "available"):
                reporting.add_test_step("Create scheduled workload",
                                        tvaultconf.PASS)
            else:
                reporting.add_test_step("Create scheduled workload",
                                        tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            self.schedule = self.getSchedulerStatus(self.wid)
            LOG.debug("Workload schedule: " + str(self.schedule))
            if (self.schedule):
                reporting.add_test_step("Verification", tvaultconf.PASS)
                LOG.debug("Workload schedule enabled")
            else:
                reporting.add_test_step("Verification", tvaultconf.FAIL)
                LOG.error("Workload schedule not enabled")

            #Cleanup
            #Delete workload
            self.workload_delete(self.wid)
            LOG.debug("Workload deleted successfully")
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
예제 #5
0
    def test_tvault_rbac_backuprole_touser_policyjson(self):
        try:
            workload_create_error_str = "Policy doesn't allow workload:workload_create to be performed."
            snapshot_create_error_str = "Policy doesn't allow workload:workload_snapshot to be performed."
            restore_create_error_str = "Policy doesn't allow snapshot:snapshot_restore to be performed."
            workload_delete_error_str = "Policy doesn't allow workload:workload_delete to be performed."
            snapshot_delete_error_str = "Policy doesn't allow snapshot:snapshot_delete to be performed."
            restore_delete_error_str = "Policy doesn't allow restore:restore_delete to be performed."

            # Change policy.json file on tvault to change role and rule
            self.change_policyjson_file("backup", "backup_api")
            self.instances_id = []

            # Create volume, Launch an Instance
            self.volumes_id = self.create_volume(volume_cleanup=False)
            LOG.debug("Volume-1 ID: " + str(self.volumes_id))
            self.instances_id.append(self.create_vm(vm_cleanup=False))
            LOG.debug("VM-1 ID: " + str(self.instances_id[0]))
            self.attach_volume(self.volumes_id, self.instances_id[0])
            LOG.debug("Volume attached")

            # Use backupuser credentials
            os.environ['OS_USERNAME'] = CONF.identity.backupuser
            os.environ['OS_PASSWORD'] = CONF.identity.backupuser_password

            # Create workload with CLI by backup role
            workload_create = command_argument_string.workload_create + \
                " --instance instance-id=" + str(self.instances_id[0])
            error = cli_parser.cli_error(workload_create)
            if error and (str(error.strip('\n')).find('ERROR') != -1):
                LOG.debug("workload creation unsuccessful by backup role")
                raise Exception(
                    "RBAC policy fails for workload creation by backup role")
            else:
                LOG.debug("Workload created successfully by backup role")
                reporting.add_test_step(
                    "Execute workload_create command by backup role",
                    tvaultconf.PASS)
                time.sleep(10)
                self.wid1 = query_data.get_workload_id(
                    tvaultconf.workload_name)
                workload_available = self.wait_for_workload_tobe_available(
                    self.wid1)

            # Run snapshot_create CLI by backup role
            snapshot_create = command_argument_string.snapshot_create + \
                str(self.wid1)
            LOG.debug("snapshot_create command: " + str(snapshot_create))
            error = cli_parser.cli_error(snapshot_create)
            if error and (str(error.strip('\n')).find('ERROR') != -1):
                reporting.add_test_step(
                    "Execute snapshot_create command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_create did not execute correctly by backup role")
            else:
                reporting.add_test_step(
                    "Execute snapshot_create command by backup role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_create executed correctly by backup role")
                self.snapshot_id1 = query_data.get_inprogress_snapshot_id(
                    self.wid1)
                wc = self.wait_for_snapshot_tobe_available(
                    self.wid1, self.snapshot_id1)

            # Delete the original instance
            self.delete_vm(self.instances_id[0])
            LOG.debug("Instance deleted successfully for restore")

            # Delete corresponding volume
            self.delete_volume(self.volumes_id)
            LOG.debug("Volume deleted successfully for restore")

            # Create one-click restore using CLI command by backup role
            restore_command = command_argument_string.oneclick_restore + \
                " " + str(self.snapshot_id1)
            error = cli_parser.cli_error(restore_command)
            if error and (str(error.strip('\n')).find('ERROR') != -1):
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command one-click restore did not execute correctly by backup role")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command by backup role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command one-click restore executed correctly backup role")
                wc = self.wait_for_snapshot_tobe_available(
                    self.wid1, self.snapshot_id1)
                self.restore_id1 = query_data.get_snapshot_restore_id(
                    self.snapshot_id1)
                LOG.debug("Restore ID: " + str(self.restore_id1))
                self.restore_vm_id1 = self.get_restored_vm_list(
                    self.restore_id1)
                LOG.debug("Restore VM ID: " + str(self.restore_vm_id1))
                self.restore_volume_id1 = self.get_restored_volume_list(
                    self.restore_id1)
                LOG.debug("Restore Volume ID: " + str(self.restore_volume_id1))


            # Use admin credentials
            os.environ['OS_USERNAME'] = CONF.identity.username
            os.environ['OS_PASSWORD'] = CONF.identity.password

            # Create workload with CLI by admin role
            workload_create = command_argument_string.workload_create + \
                " --instance instance-id=" + str(self.restore_vm_id1[0])
            error = cli_parser.cli_error(workload_create)
            if error and (str(error.strip('\n')).find(workload_create_error_str) != -1):
                LOG.debug(
                    "Command workload_create did not execute correctly by admin role")
                reporting.add_test_step(
                    "Can not execute workload_create command by admin role",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Command workload_create did not execute correctly by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload_create executed correctly by admin role")

            # Run snapshot_create CLI by admin role
            import pdb; pdb.set_trace()
            snapshot_create = command_argument_string.snapshot_create + \
                str(self.wid1)
            LOG.debug("snapshot_create command: " + str(snapshot_create))
            error = cli_parser.cli_error(snapshot_create)
            if error and (str(error.strip('\n')).find(snapshot_create_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute snapshot_create command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_create did not execute correctly by admin role")
            else:
                reporting.add_test_step(
                    "Can not execute snapshot_create command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_create executed correctly by admin role")

            # Create one-click restore using CLI command by admin role
            restore_command = command_argument_string.oneclick_restore + \
                " " + str(self.snapshot_id1)
            error = cli_parser.cli_error(restore_command)
            if error and (str(error.strip('\n')).find(restore_create_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute restore_create command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_create did not execute correctly by admin role")
            else:
                reporting.add_test_step(
                    "Can not execute restore_create command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command restore_create executed correctly by admin role")

            # Run restore_delete CLI by admin role
            restore_delete = command_argument_string.restore_delete + \
                str(self.restore_id1)
            error = cli_parser.cli_error(restore_delete)
            if error and (str(error.strip('\n')).find(restore_delete_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute restore_delete command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_delete did not execute correctly by admin role")
            else:
                reporting.add_test_step(
                    "Can not execute restore_delete command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command restore_delete executed correctly by admin role")

            # Run snapshot_delete CLI by admin role
            snapshot_delete = command_argument_string.snapshot_delete + \
                str(self.snapshot_id1)
            error = cli_parser.cli_error(snapshot_delete)
            if error and (str(error.strip('\n')).find(snapshot_delete_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute snapshot_delete command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_delete did not execute correctly by admin role")
            else:
                reporting.add_test_step(
                    "Can not execute snapshot_delete command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_delete executed correctly by admin role")

            # Delete workload with CLI by admin role
            workload_delete = command_argument_string.workload_delete + \
                str(self.wid1)
            error = cli_parser.cli_error(workload_delete)
            if error and (str(error.strip('\n')).find(workload_delete_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute workload_delete command by admin role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command workload_delete did not execute correctly by admin role")
            else:
                reporting.add_test_step(
                    "Can not execute workload_delete command by admin role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload_delete executed correctly by admin role")

            # Use nonadmin credentials
            os.environ['OS_USERNAME'] = CONF.identity.nonadmin_user
            os.environ['OS_PASSWORD'] = CONF.identity.nonadmin_password

            # Create workload with CLI by default role
            workload_create = command_argument_string.workload_create + \
                " --instance instance-id=" + str(self.restore_vm_id1)
            error = cli_parser.cli_error(workload_create)
            if error and (str(error.strip('\n')).find(workload_create_error_str) != -1):
                LOG.debug(
                    "Command workload_create did not execute correctly by default role")
                reporting.add_test_step(
                    "Can not execute workload_create command by default role",
                    tvaultconf.PASS)
            else:
                reporting.add_test_step(
                    "Can not execute workload_create command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload_create executed correctly by default role")

            # Run snapshot_create CLI by default role
            snapshot_create = command_argument_string.snapshot_create + \
                str(self.wid1)
            error = cli_parser.cli_error(snapshot_create)
            if error and (str(error.strip('\n')).find(snapshot_create_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute snapshot_create command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_create did not execute correctly by default role")
            else:
                reporting.add_test_step(
                    "Can not execute snapshot_create command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_create executed correctly by default role")

            # Create one-click restore using CLI by default role
            restore_command = command_argument_string.oneclick_restore + \
                " " + str(self.snapshot_id1)
            error = cli_parser.cli_error(restore_command)
            if error and (str(error.strip('\n')).find(restore_create_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute restore_create command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_create did not execute correctly by default role")
            else:
                reporting.add_test_step(
                    "Can not execute restore_create command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command restore_create executed correctly by default role")

            # Run restore_delete CLI by default role
            restore_delete = command_argument_string.restore_delete + \
                str(self.restore_id1)
            error = cli_parser.cli_error(restore_delete)
            if error and (str(error.strip('\n')).find(restore_delete_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute restore_delete command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_delete did not execute correctly by default role")
            else:
                reporting.add_test_step(
                    "Can not execute restore_delete command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command restore_delete executed correctly by default role")

            # Run snapshot_delete CLI by default role
            snapshot_delete = command_argument_string.snapshot_delete + \
                str(self.snapshot_id1)
            LOG.debug("snapshot_delete command: " + str(snapshot_delete))
            error = cli_parser.cli_error(snapshot_delete)
            if error and (str(error.strip('\n')).find(snapshot_delete_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute snapshot_delete command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_delete did not execute correctly by default role")
            else:
                reporting.add_test_step(
                    "Can not execute snapshot_delete command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_delete executed correctly by default role")

            # Delete workload with CLI by default role
            workload_delete = command_argument_string.workload_delete + \
                str(self.wid1)
            error = cli_parser.cli_error(workload_delete)
            if error and (str(error.strip('\n')).find(workload_delete_error_str) != -1):
                reporting.add_test_step(
                    "Can not execute workload_delete command by default role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command workload_delete did not execute correctly by default role")
            else:
                reporting.add_test_step(
                    "Can not execute workload_delete command by default role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload_delete executed correctly by default role")

            # Use backupuser credentials
            os.environ['OS_USERNAME'] = CONF.identity.backupuser
            os.environ['OS_PASSWORD'] = CONF.identity.backupuser_password

            # Run restore_delete CLI by backup role
            restore_delete = command_argument_string.restore_delete + \
                str(self.restore_id1)
            error = cli_parser.cli_error(restore_delete)
            if error and (str(error.strip('\n')).find(restore_delete_error_str) != -1):
                reporting.add_test_step(
                    "Execute  restore_delete command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command  restore_delete did not execute correctly by backup role")
            else:
                reporting.add_test_step(
                    "Execute restore_delete command by backup role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command restore_delete executed correctly by backup role")
                wc = self.wait_for_snapshot_tobe_available(
                    self.wid1, self.snapshot_id1)
                # Delete restored VM instance and volume
                self.delete_restored_vms(
                    self.restore_vm_id1, self.restore_volume_id1)
                LOG.debug("Restored VMs deleted successfully by backup role")

            # Run snapshot_delete CLI by backup role
            snapshot_delete = command_argument_string.snapshot_delete + \
                str(self.snapshot_id1)
            LOG.debug("snapshot_delete command: " + str(snapshot_delete))
            error = cli_parser.cli_error(snapshot_delete)
            if error and (str(error.strip('\n')).find(snapshot_delete_error_str) != -1):
                reporting.add_test_step(
                    "Execute snapshot_delete command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command snapshot_delete did not execute correctly by backup role")
            else:
                reporting.add_test_step(
                    "Execute snapshot_delete command by backup role",
                    tvaultconf.PASS)
                LOG.debug(
                    "Command snapshot_delete executed correctly by backup role")
                workload_available = self.wait_for_workload_tobe_available(
                    self.wid1)

            # Delete workload with CLI by backup role
            workload_delete = command_argument_string.workload_delete + \
                str(self.wid1)
            error = cli_parser.cli_error(workload_delete)
            if error and (str(error.strip('\n')).find(workload_delete_error_str) != -1):
                reporting.add_test_step(
                    "Execute workload_delete command by backup role",
                    tvaultconf.FAIL)
                raise Exception(
                    "RBAC policy fails for workload deletion by backup role")
            else:
                LOG.debug("Workload deleted successfully by backup role")
                reporting.add_test_step(
                    "Execute workload_delete command by backup role",
                    tvaultconf.PASS)

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
예제 #6
0
    def test_1_image_booted(self):
        try:
            deleted = 0
            ## VM and Workload ###
            tests = [['tempest.api.workloadmgr.restore.test_image_booted_Selective-restore',0], ['tempest.api.workloadmgr.restore.test_image_booted_Inplace-restore',0], ['tempest.api.workloadmgr.restore.test_image_booted_Oneclick-restore',0]]
            reporting.add_test_script(tests[0][0])
            data_dir_path = "/root"
            md5sums_before_full = {}
            LOG.debug("******************")            
            kp = self.create_key_pair(tvaultconf.key_pair_name, keypair_cleanup=True)
            LOG.debug("Key_pair : "+str(kp))            

            vm_id = self.create_vm(key_pair=kp, vm_cleanup=False)
            LOG.debug("VM ID : "+str(vm_id))
            time.sleep(30)

            floating_ip_1 = self.assign_floating_ips(vm_id, False)
            LOG.debug("Assigned floating IP : "+str(floating_ip_1))

            LOG.debug("Sleeping for 20 sec")
            time.sleep(20)
            
            self.data_ops(floating_ip_1, data_dir_path, 3)
            LOG.debug("Created data")            

            md5sums_before_full = self.calcmd5sum(floating_ip_1, data_dir_path)
            LOG.debug("MD5sums for directory on original vm : "+str(md5sums_before_full))

            
            workload_create = command_argument_string.workload_create + " --instance instance-id=" +str(vm_id)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step("Execute workload-create command", tvaultconf.FAIL)
                raise Exception("Workload-create command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-create command", tvaultconf.PASS)
                LOG.debug("Workload-create command executed correctly")

            time.sleep(10)
            workload_id = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(workload_id))
            if(workload_id != None):
                self.wait_for_workload_tobe_available(workload_id)
                if(self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    raise Exception("Workload creation failed")
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                raise Exception("Workload creation failed")

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.workload_delete, workload_id)

            ### Full snapshot ###

            snapshot_id = self.create_snapshot(workload_id, is_full=True)

            #Add some more data to files on VM
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            self.addCustomfilesOnLinuxVM(ssh, data_dir_path, 2)
            ssh.close()
            md5sums_before_incremental = {}
            md5sums_before_incremental = self.calcmd5sum(floating_ip_1, data_dir_path)
            LOG.debug("\nMD5SUM after adding additional data before incremental snapshot : {}\n".format(md5sums_before_incremental))

            ### Incremental snapshot ###

            incr_snapshot_id = self.create_snapshot(workload_id, is_full=False)

            ### Selective restore ###

            rest_details = {}
            rest_details['rest_type'] = 'selective'
            rest_details['network_id'] = CONF.network.internal_network_id
            rest_details['subnet_id'] = self.get_subnet_id(CONF.network.internal_network_id)
            volumeslist =  []
            rest_details['instances'] = {vm_id:volumeslist}

            payload = self.create_restore_json(rest_details)
            #Trigger selective restore
            restore_id_1=self.snapshot_selective_restore(workload_id, snapshot_id,restore_name=tvaultconf.restore_name, restore_cleanup=True,
                                                            instance_details=payload['instance_details'], network_details=payload['network_details'])
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if(self.getRestoreStatus(workload_id, snapshot_id, restore_id_1) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                raise Exception("Selective restore failed")

            #Fetch instance details after restore
            vm_list  =  self.get_restored_vm_list(restore_id_1)
            LOG.debug("Restored vm(selective) ID : " + str(vm_list))
            time.sleep(60)
            floating_ip_2 = self.assign_floating_ips(vm_list[0], True)
            LOG.debug("Floating ip assigned to selective restore vm -> "+str(floating_ip_2))
            md5sums_after_selective = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_2))
            md5sums_after_selective = self.calcmd5sum(floating_ip_2, data_dir_path)
            ssh.close()

            LOG.debug("MD5SUMS before restore")
            LOG.debug(md5sums_before_full[str(floating_ip_1)])
            LOG.debug("MD5SUMS after selective restore")
            LOG.debug(md5sums_after_selective[str(floating_ip_2)])

            if md5sums_before_full[str(floating_ip_1)] == md5sums_after_selective[str(floating_ip_2)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.PASS)
                reporting.set_test_script_status(tvaultconf.PASS)
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            restored_vm_details = []
            for id in range(len(vm_list)):
                restored_vm_details.append(self.get_vm_details(vm_list[id]))
            LOG.debug("Restored vm details list: " + str(restored_vm_details))

            vms_details_after_restore = self.get_vms_details_list(restored_vm_details)
            LOG.debug("VM details after restore: " + str(vms_details_after_restore))
            #Compare the data before and after restore
            int_net_1_name = self.get_net_name(CONF.network.internal_network_id) 
            for i in range(len(vms_details_after_restore)):
                if(vms_details_after_restore[i]['network_name'] == int_net_1_name):
                    reporting.add_test_step("Network verification for instance-" + str(i+1), tvaultconf.PASS)
                    tests[0][1] = 1
                    reporting.test_case_to_write()
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error("Restored network: " + str(vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step("Network verification for instance-" + str(i+1), tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
                    reporting.test_case_to_write()


            ### In-place Restore ###

            rest_details = {}
            rest_details['rest_type'] = 'inplace'
            rest_details['instances'] = {vm_id:volumeslist}

            reporting.add_test_script(tests[1][0]) 
            #Create in-place restore with CLI command
            restore_command  = command_argument_string.inplace_restore + str(tvaultconf.restore_filename) + " "  + str(snapshot_id)
            payload = self.create_restore_json(rest_details)
            restore_json = json.dumps(payload)
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            #Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(yaml.safe_load(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Triggering In-Place restore via CLI", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Triggering In-Place restore via CLI", tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #get restore id from database
            restore_id_2 = query_data.get_snapshot_restore_id(snapshot_id)
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)

            #get in-place restore status
            if(self.getRestoreStatus(workload_id, snapshot_id, restore_id_2) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            #Fetch instance details after restore
            vm_list = []
            vm_list  =  self.get_restored_vm_list(restore_id_2)
            LOG.debug("Restored vm(In-place) ID : " + str(vm_list))

            time.sleep(60)
            md5sums_after_inplace = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            md5sums_after_inplace = self.calcmd5sum(floating_ip_1, data_dir_path)
            ssh.close()

            LOG.debug("<----md5sums_before_full---->")
            LOG.debug(md5sums_before_full[str(floating_ip_1)])
            LOG.debug("<----md5sums_after_inplace---->")
            LOG.debug(md5sums_after_inplace[str(floating_ip_1)])

            if md5sums_before_full[str(floating_ip_1)] == md5sums_after_inplace[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.PASS)
                tests[1][1] = 1
                reporting.set_test_script_status(tvaultconf.PASS)
                reporting.test_case_to_write()
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.test_case_to_write()

            #Delete restore for snapshot
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, snapshot_id, restore_id_2)


            ### One-click restore ###

            reporting.add_test_script(tests[2][0])

            #Delete the original instance
            self.delete_vm(vm_id)
            LOG.debug("Instance deleted successfully for one click restore : "+str(vm_id))
            time.sleep(10)

            deleted = 1

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + incr_snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Execute snapshot-oneclick-restore command", tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute snapshot-oneclick-restore command", tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            restore_id_3 = query_data.get_snapshot_restore_id(incr_snapshot_id)
            LOG.debug("Restore ID: " + str(restore_id_3))

            self.wait_for_snapshot_tobe_available(workload_id, incr_snapshot_id)
            if(self.getRestoreStatus(workload_id, incr_snapshot_id, restore_id_3) == "available"):
                reporting.add_test_step("One-click restore", tvaultconf.PASS)
                LOG.debug("One-click restore passed")
            else:
                reporting.add_test_step("One-click restore", tvaultconf.FAIL)
                LOG.debug("One-click restore failed")
                raise Exception("One-click restore failed")
            LOG.debug("One-click restore complete")

            #Fetch instance details after restore
            vm_list = []
            vm_list  =  self.get_restored_vm_list(restore_id_3)
            LOG.debug("Restored vms : " + str(vm_list))

            md5sums_after_1clickrestore = {}
            ssh = self.SshRemoteMachineConnectionWithRSAKey(str(floating_ip_1))
            md5sums_after_1clickrestore = self.calcmd5sum(floating_ip_1, data_dir_path)
            LOG.debug("MD5SUMS after one click restore : {}".format(md5sums_after_1clickrestore))
            ssh.close()

            if md5sums_before_incremental[str(floating_ip_1)] == md5sums_after_1clickrestore[str(floating_ip_1)]:
                LOG.debug("***MDSUMS MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.PASS)
                tests[2][0] = 1
                reporting.set_test_script_status(tvaultconf.PASS)
                reporting.test_case_to_write()
            else:
                LOG.debug("***MDSUMS DON'T MATCH***")
                reporting.add_test_step("Md5 Verification for volume", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
                reporting.test_case_to_write()

            restored_volumes = []
            restored_volumes = self.get_restored_volume_list(restore_id_3)
            LOG.debug("Restored volumes : ")
            LOG.debug(restored_volumes)

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, incr_snapshot_id, restore_id_3)
                time.sleep(30)
                self.addCleanup(self.delete_restored_vms, vm_list, restored_volumes)

        except Exception as e:
            LOG.error("Exception: " + str(e))
            if (deleted == 0):
                try:
                    self.delete_vm(vm_id)
                except:
                    pass
            for test in tests:
                if test[1] != 1:
                    reporting.add_test_script(test[0])
                    reporting.set_test_script_status(tvaultconf.FAIL)
                    reporting.test_case_to_write()
예제 #7
0
    def test_1_image_booted(self):
        try:
            ### Create vm and workload ###
            deleted = 0
            reporting.add_test_script(str(__name__))

            self.created = False
            vm_id = self.create_vm(vm_cleanup=False)
            LOG.debug("\nVm id : {}\n".format(str(vm_id)))

            workload_create = command_argument_string.workload_create + " --instance instance-id=" + str(
                vm_id)
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step("Execute workload-create command",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload-create command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-create command",
                                        tvaultconf.PASS)
                LOG.debug("Workload-create command executed correctly")

            time.sleep(10)
            workload_id = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID: " + str(workload_id))
            if (workload_id != None):
                self.wait_for_workload_tobe_available(workload_id)
                if (self.getWorkloadStatus(workload_id) == "available"):
                    reporting.add_test_step("Create workload", tvaultconf.PASS)
                else:
                    reporting.add_test_step("Create workload", tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step("Create workload", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)

            LOG.debug("\nworkload id : {}\n".format(str(workload_id)))
            LOG.debug("\nvm id : {}\n".format(str(vm_id)))
            time.sleep(40)
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.workload_delete, workload_id)

            ### Full snapshot ###

            self.created = False

            #Create snapshot with CLI command
            create_snapshot = command_argument_string.snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-snapshot command with --full",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-snapshot command with --full",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            snapshot_id = query_data.get_inprogress_snapshot_id(workload_id)
            LOG.debug("\nFull-snapshot ID: {}".format(str(snapshot_id)))
            wc = self.wait_for_snapshot_tobe_available(workload_id,
                                                       snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Full snapshot", tvaultconf.PASS)
                self.created = True
            else:
                if (str(wc) == "error"):
                    pass
            if (self.created == False):
                reporting.add_test_step("Full snapshot", tvaultconf.FAIL)
                raise Exception("Workload snapshot did not get created")
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.snapshot_delete, workload_id, snapshot_id)

            ### Incremental snapshot ###

            self.created = False
            LOG.debug("workload is:" + str(workload_id))

            #Create incremental snapshot using CLI command
            create_snapshot = command_argument_string.incr_snapshot_create + workload_id
            LOG.debug("Create snapshot command: " + str(create_snapshot))
            rc = cli_parser.cli_returncode(create_snapshot)
            if rc != 0:
                reporting.add_test_step("Execute workload-snapshot command",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Execute workload-snapshot command",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            incr_snapshot_id = query_data.get_inprogress_snapshot_id(
                workload_id)
            LOG.debug("\nIncremental-snapshot ID: {}".format(
                str(incr_snapshot_id)))
            #Wait for incremental snapshot to complete
            wc = self.wait_for_snapshot_tobe_available(workload_id,
                                                       incr_snapshot_id)
            if (str(wc) == "available"):
                reporting.add_test_step("Incremental snapshot",
                                        tvaultconf.PASS)
                LOG.debug(
                    "Workload incremental snapshot successfully completed")
                self.created = True
            if (self.created == False):
                reporting.add_test_step("Incremental snapshot",
                                        tvaultconf.FAIL)
                raise Exception(
                    "Workload incremental snapshot did not get created")
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.snapshot_delete, workload_id,
                                incr_snapshot_id)

            ### Selective restore ###

            instance_details = []
            network_details = []
            restored_vm_details_list = []
            vms_details_after_restore = []
            int_net_1_name = self.get_net_name(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_name" + str(int_net_1_name))
            int_net_1_subnets = self.get_subnet_id(
                CONF.network.internal_network_id)
            LOG.debug("int_net_1_subnet" + str(int_net_1_subnets))

            #Create instance details for restore.json
            vm_name = "tempest_test_vm_" + vm_id + "_restored"
            temp_instance_data = {
                'id': vm_id,
                'availability_zone': CONF.compute.vm_availability_zone,
                'include': True,
                'restore_boot_disk': True,
                'name': vm_name
            }
            instance_details.append(temp_instance_data)
            LOG.debug("Instance details for restore: " + str(instance_details))

            #Create network details for restore.json
            snapshot_network = {
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            target_network = {
                'name': int_net_1_name,
                'id': CONF.network.internal_network_id,
                'subnet': {
                    'id': int_net_1_subnets
                }
            }
            network_details = [{
                'snapshot_network': snapshot_network,
                'target_network': target_network
            }]
            LOG.debug("Network details for restore: " + str(network_details))
            LOG.debug("Snapshot id : " + str(snapshot_id))

            #Trigger selective restore

            restore_id_1 = self.snapshot_selective_restore(
                workload_id,
                snapshot_id,
                restore_cleanup=True,
                restore_name=tvaultconf.restore_name,
                instance_details=instance_details,
                network_details=network_details)
            LOG.debug("\nselective-restore id : {}\n".format(
                str(restore_id_1)))
            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if (self.getRestoreStatus(workload_id, snapshot_id,
                                      restore_id_1) == "available"):
                reporting.add_test_step("Selective restore", tvaultconf.PASS)
                LOG.debug("Selective restore passed")
            else:
                reporting.add_test_step("Selective restore", tvaultconf.FAIL)
                LOG.debug("Selective restore failed")
                raise Exception("Selective restore failed")
            LOG.debug("selective restore complete")

            #Fetch instance details after restore
            restored_vm_details_list = []
            vm_list = self.get_restored_vm_list(restore_id_1)
            LOG.debug("Restored vm(selective) ID : " + str(vm_list))

            for id in range(len(vm_list)):
                restored_vm_details_list.append(
                    self.get_vm_details(vm_list[id]))
            LOG.debug("Restored vm details list: " +
                      str(restored_vm_details_list))

            vms_details_after_restore = self.get_vms_details_list(
                restored_vm_details_list)
            LOG.debug("VM details after restore: " +
                      str(vms_details_after_restore))

            #Compare the data before and after restore
            for i in range(len(vms_details_after_restore)):
                if (vms_details_after_restore[i]['network_name'] ==
                        int_net_1_name):
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.PASS)
                else:
                    LOG.error("Expected network: " + str(int_net_1_name))
                    LOG.error(
                        "Restored network: " +
                        str(vms_details_after_restore[i]['network_name']))
                    reporting.add_test_step(
                        "Network verification for instance-" + str(i + 1),
                        tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)

            ### In-place restore ###

#Create in-place restore with CLI command
            restore_command = command_argument_string.inplace_restore + str(
                tvaultconf.restore_filename) + " " + str(incr_snapshot_id)

            LOG.debug("inplace restore cli command: " + str(restore_command))
            #Restore.json with only volume 2 excluded
            restore_json = json.dumps({
                'openstack': {
                    'instances': [{
                        'restore_boot_disk': True,
                        'include': True,
                        'id': vm_id
                    }],
                    'networks_mapping': {
                        'networks': []
                    }
                },
                'restore_type': 'inplace',
                'type': 'openstack'
            })
            LOG.debug("restore.json for inplace restore: " + str(restore_json))
            #Create Restore.json
            with open(tvaultconf.restore_filename, 'w') as f:
                f.write(str(json.loads(restore_json)))
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step("Triggering In-Place restore via CLI",
                                        tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #get restore id from database
            restore_id_2 = query_data.get_snapshot_restore_id(incr_snapshot_id)
            LOG.debug("\ninplace-restore id : {}\n".format(str(restore_id_2)))

            self.wait_for_snapshot_tobe_available(workload_id,
                                                  incr_snapshot_id)

            #get in-place restore status
            if (self.getRestoreStatus(workload_id, incr_snapshot_id,
                                      restore_id_2) == "available"):
                reporting.add_test_step("In-place restore", tvaultconf.PASS)
            else:
                reporting.add_test_step("In-place restore", tvaultconf.FAIL)
                raise Exception("In-place restore failed")

            #Fetch instance details after restore
            restored_vm_details_list = []
            vm_list = self.get_restored_vm_list(restore_id_2)
            LOG.debug("Restored vm(In-place) ID : " + str(vm_list))

            #Delete restore for snapshot
            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id,
                                incr_snapshot_id, restore_id_2)
            LOG.debug("Snapshot Restore(in-place) deleted successfully")

            ### One-click Restore ###

            #Delete the original instance
            self.delete_vm(vm_id)
            LOG.debug(
                "Instance deleted successfully for one click restore : " +
                str(vm_id))
            deleted = 1

            #Create one-click restore using CLI command
            restore_command = command_argument_string.oneclick_restore + " " + snapshot_id
            rc = cli_parser.cli_returncode(restore_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute snapshot-oneclick-restore command",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            restore_id_3 = query_data.get_snapshot_restore_id(snapshot_id)
            LOG.debug("\nRestore ID: {}\n".format(str(restore_id_3)))

            self.wait_for_snapshot_tobe_available(workload_id, snapshot_id)
            if (self.getRestoreStatus(workload_id, snapshot_id,
                                      restore_id_3) == "available"):
                reporting.add_test_step("One-click restore", tvaultconf.PASS)
                LOG.debug("One-click restore passed")
            else:
                reporting.add_test_step("One-click restore", tvaultconf.FAIL)
                LOG.debug("One-click restore failed")
                raise Exception("One-click restore failed")
            LOG.debug("One-click restore complete")

            restored_volumes = self.get_restored_volume_list(restore_id_3)
            vm_list = self.get_restored_vm_list(restore_id_3)

            LOG.debug("Restored vms : " + str(vm_list))

            if (tvaultconf.cleanup == True):
                self.addCleanup(self.restore_delete, workload_id, snapshot_id,
                                restore_id_3)
                self.addCleanup(self.delete_restored_vms, vm_list,
                                restored_volumes)
            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            if (deleted == 0):
                self.delete_vm(vm_id)
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()
    def test_3_modify_workload_scheduler_enable(self):
        reporting.add_test_script(str(__name__) + "_scheduler_enable")
        try:
            # Prerequisites
            self.created = False
            self.workload_instances = []

            # Launch instance
            self.vm_id = self.create_vm()
            LOG.debug("VM ID-3: " + str(self.vm_id))

            # Create volume
            self.volume_id = self.create_volume()
            LOG.debug("Volume ID-3: " + str(self.volume_id))

            # Attach volume to the instance
            self.attach_volume(self.volume_id, self.vm_id)
            LOG.debug("Volume attached-3")

            # Create workload with scheduler disabled using CLI
            workload_create = command_argument_string.workload_create + \
                " --instance instance-id=" + \
                str(self.vm_id) + " --jobschedule enabled=False"
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-create command with scheduler disable",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload create did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-create command with scheduler disable",
                    tvaultconf.PASS)
                LOG.debug("Command workload create executed correctly")

            time.sleep(10)
            self.wid = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID-3: " + str(self.wid))
            if (self.wid is not None):
                self.wait_for_workload_tobe_available(self.wid)
                if (self.getWorkloadStatus(self.wid) == "available"):
                    reporting.add_test_step(
                        "Create workload with scheduler disable",
                        tvaultconf.PASS)
                else:
                    reporting.add_test_step(
                        "Create workload with scheduler disable",
                        tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step(
                    "Create workload with scheduler disable", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
            LOG.debug("Workload ID: " + str(self.wid))

            # Verify workload created scheduler disable
            status = self.getSchedulerStatus(self.wid)
            if status:
                reporting.add_test_step(
                    "Verify workload created with scheduler disable",
                    tvaultconf.FAIL)
                raise Exception(
                    "Workload has not been created with scheduler disabled")
            else:
                reporting.add_test_step(
                    "Verify workload created with scheduler disable",
                    tvaultconf.PASS)
                LOG.debug(
                    "Workload created with scheduler disabled successfully")

            # Get workload scheduler details
            schedule_details = self.getSchedulerDetails(self.wid)
            scheduled_start_time = schedule_details['start_time']
            interval = schedule_details['interval']

            # Change global job scheduler to disable
            LOG.debug("Change Global job scheduler to disable")
            status = self.disable_global_job_scheduler()
            if not status:
                reporting.add_test_step("Global job scheduler disable",
                                        tvaultconf.PASS)
                LOG.debug("Global job scheduler disabled successfully")
            else:
                reporting.add_test_step("Global job scheduler disable",
                                        tvaultconf.FAIL)
                raise Exception("Global job scheduler not disabled")

            # Modify workload scheduler to enable
            workload_modify_command = command_argument_string.workload_modify + \
                str(self.wid) + " --jobschedule enabled=True"
            error = cli_parser.cli_error(workload_modify_command)
            if error and (str(
                    error.strip('\n')
            ).find("Cannot update scheduler related fields when global jobscheduler is disabled."
                   ) != -1):
                reporting.add_test_step(
                    "Does not execute workload-modify scheduler enable",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")
                reporting.add_test_step("Throws proper message",
                                        tvaultconf.PASS)
                LOG.debug("Error message :" + str(error))
            else:
                reporting.add_test_step(
                    "Does not execute workload-modify scheduler enable",
                    tvaultconf.FAIL)
                reporting.add_test_step("Throws proper message",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")

            # Change global job scheduler to enable
            LOG.debug("Change Global job scheduler to enable")
            status = self.enable_global_job_scheduler()
            if status:
                reporting.add_test_step("Global job scheduler enable",
                                        tvaultconf.PASS)
                LOG.debug("Global job scheduler enabled successfully")
            else:
                reporting.add_test_step("Global job scheduler enable",
                                        tvaultconf.FAIL)
                raise Exception("Global job scheduler not enabled")

            # Modify workload scheduler to enable and set the start date, time
            # and timezone
            now = datetime.datetime.utcnow()
            now_date = datetime.datetime.strftime(now, "%m/%d/%Y")
            now_time = datetime.datetime.strftime(now, "%I:%M %p")
            now_time_plus_15 = now + datetime.timedelta(minutes=15)
            now_time_plus_15 = datetime.datetime.strftime(
                now_time_plus_15, "%I:%M %p")
            workload_modify_command = command_argument_string.workload_modify + str(
                self.wid
            ) + " --jobschedule enabled=True" + " --jobschedule start_date=" + str(
                now_date) + " --jobschedule start_time=" + "'" + str(
                    now_time_plus_15).strip(
                    ) + "'" + " --jobschedule timezone=UTC"
            rc = cli_parser.cli_returncode(workload_modify_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-modify scheduler enable",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-modify scheduler enable",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            # Verify workload scheduler changed to enable
            self.wait_for_workload_tobe_available(self.wid)
            status = self.getSchedulerStatus(self.wid)
            if status:
                reporting.add_test_step("Verify workload scheduler enabled",
                                        tvaultconf.PASS)
                LOG.debug("workload scheduler enabled successfully")
            else:
                reporting.add_test_step("Verify workload scheduler enabled",
                                        tvaultconf.FAIL)
                LOG.debug("workload scheduler enabled unsuccessfully")

            # Verify interval value and nest_snapshot_run values
            schedule_details = self.getSchedulerDetails(self.wid)
            interval_after_enable = schedule_details['interval']
            next_run_time_after_enable = schedule_details['nextrun']
            next_run_time_after_enable = int(next_run_time_after_enable)
            LOG.debug("interval_after_enable " + str(interval_after_enable))
            LOG.debug("next_run_time_after_enable" +
                      str(next_run_time_after_enable))
            start_date = schedule_details['start_date']
            start_time = schedule_details['start_time']
            date_time = start_date + " " + start_time
            start_date_time = datetime.datetime.strptime(
                date_time, "%m/%d/%Y %H:%M %p")
            LOG.debug("Scheduled start and date time is: " +
                      str(start_date_time))
            utc_24hr = datetime.datetime.utcnow()
            utc_12hr = datetime.datetime.strftime(utc_24hr,
                                                  "%m/%d/%Y %I:%M %p")
            utc_12hr = datetime.datetime.strptime(utc_12hr,
                                                  "%m/%d/%Y %H:%M %p")
            time_diff = (start_date_time - utc_12hr).total_seconds()
            time_diff = int(time_diff)
            LOG.debug(
                "Time difference between UTC time and scheduled start time: " +
                str(time_diff))
            delta = abs(time_diff - next_run_time_after_enable)

            # Condition for Interval value and time difference should not be
            # more than two minutes
            if delta < 120 and interval == interval_after_enable:
                reporting.add_test_step(
                    "Verify Interval and Next snapshot run time values are correct",
                    tvaultconf.PASS)
                LOG.debug(
                    "Interval and Next snapshot run time values are correct")
            else:
                reporting.add_test_step(
                    "Verify Interval and Next snapshot run time values are correct",
                    tvaultconf.FAIL)
                raise Exception(
                    "Interval and Next snapshot run time values are incorrect")

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()

        finally:
            # Delete workload
            status = self.workload_delete(self.wid)
            time.sleep(10)
    def test_3_modify_workload_scheduler_enable(self):
        reporting.add_test_script(str(__name__) + "_scheduler_enable")
        try:
            #Prerequisites
            self.created = False
            self.workload_instances = []

            #Launch instance
            self.vm_id = self.create_vm()
            LOG.debug("VM ID-3: " + str(self.vm_id))

            #Create volume
            self.volume_id = self.create_volume()
            LOG.debug("Volume ID-3: " + str(self.volume_id))

            #Attach volume to the instance
            self.attach_volume(self.volume_id, self.vm_id)
            LOG.debug("Volume attached-3")

            #Create workload with scheduler disabled using CLI
            workload_create = command_argument_string.workload_create + " --instance instance-id=" + str(
                self.vm_id) + " --jobschedule enabled=False"
            rc = cli_parser.cli_returncode(workload_create)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-create command with scheduler disable",
                    tvaultconf.FAIL)
                raise Exception(
                    "Command workload create did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-create command with scheduler disable",
                    tvaultconf.PASS)
                LOG.debug("Command workload create executed correctly")

            time.sleep(10)
            self.wid = query_data.get_workload_id(tvaultconf.workload_name)
            LOG.debug("Workload ID-3: " + str(self.wid))
            if (self.wid != None):
                self.wait_for_workload_tobe_available(self.wid)
                if (self.getWorkloadStatus(self.wid) == "available"):
                    reporting.add_test_step(
                        "Create workload with scheduler disable",
                        tvaultconf.PASS)
                else:
                    reporting.add_test_step(
                        "Create workload with scheduler disable",
                        tvaultconf.FAIL)
                    reporting.set_test_script_status(tvaultconf.FAIL)
            else:
                reporting.add_test_step(
                    "Create workload with scheduler disable", tvaultconf.FAIL)
                reporting.set_test_script_status(tvaultconf.FAIL)
            LOG.debug("Workload ID: " + str(self.wid))

            #Verify workload created scheduler disable
            status = self.getSchedulerStatus(self.wid)
            if status:
                reporting.add_test_step(
                    "Verify workload created with scheduler disable",
                    tvaultconf.FAIL)
                raise Exception(
                    "Workload has not been created with scheduler disabled")
            else:
                reporting.add_test_step(
                    "Verify workload created with scheduler disable",
                    tvaultconf.PASS)
                LOG.debug(
                    "Workload created with scheduler disabled successfully")

#Get workload scheduler details
            schedule_details = self.getSchedulerDetails(self.wid)
            scheduled_start_time = schedule_details['start_time']
            interval = schedule_details['interval']

            #Change global job scheduler to disable
            LOG.debug("Change Global job scheduler to disable")
            status = self.disable_global_job_scheduler()
            if not status:
                reporting.add_test_step("Global job scheduler disable",
                                        tvaultconf.PASS)
                LOG.debug("Global job scheduler disabled successfully")
            else:
                reporting.add_test_step("Global job scheduler disable",
                                        tvaultconf.FAIL)
                raise Exception("Global job scheduler not disabled")

            #Modify workload scheduler to enable
            workload_modify_command = command_argument_string.workload_modify + str(
                self.wid) + " --jobschedule enabled=True"
            error = cli_parser.cli_error(workload_modify_command)
            if error and str(
                    error.strip('\n')
            ) == "ERROR: Cannot update scheduler related fields when global jobscheduler is disabled.":
                reporting.add_test_step(
                    "Does not execute workload-modify scheduler enable",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")
                reporting.add_test_step("Throws proper message",
                                        tvaultconf.PASS)
                LOG.debug("Error message :" + str(error))
            else:
                reporting.add_test_step(
                    "Does not execute workload-modify scheduler enable",
                    tvaultconf.FAIL)
                reporting.add_test_step("Throws proper message",
                                        tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")

            #Change global job scheduler to enable
            LOG.debug("Change Global job scheduler to enable")
            status = self.enable_global_job_scheduler()
            if status:
                reporting.add_test_step("Global job scheduler enable",
                                        tvaultconf.PASS)
                LOG.debug("Global job scheduler enabled successfully")
            else:
                reporting.add_test_step("Global job scheduler enable",
                                        tvaultconf.FAIL)
                raise Exception("Global job scheduler not enabled")

#Modify workload scheduler to enable
            workload_modify_command = command_argument_string.workload_modify + str(
                self.wid) + " --jobschedule enabled=True"
            rc = cli_parser.cli_returncode(workload_modify_command)
            if rc != 0:
                reporting.add_test_step(
                    "Execute workload-modify scheduler enable",
                    tvaultconf.FAIL)
                raise Exception("Command did not execute correctly")
            else:
                reporting.add_test_step(
                    "Execute workload-modify scheduler enable",
                    tvaultconf.PASS)
                LOG.debug("Command executed correctly")

            #Verify workload scheduler changed to enable
            self.wait_for_workload_tobe_available(self.wid)
            status = self.getSchedulerStatus(self.wid)
            if status:
                reporting.add_test_step("Verify workload scheduler enabled",
                                        tvaultconf.PASS)
                LOG.debug("workload scheduler enabled successfully")
            else:
                reporting.add_test_step("Verify workload scheduler enabled",
                                        tvaultconf.FAIL)
                LOG.debug("workload scheduler enabled unsuccessfully")

#Verify interval value and nest_snapshot_run values
            schedule_details = self.getSchedulerDetails(self.wid)
            interval_after_enable = schedule_details['interval']
            next_run_time_after_enable = schedule_details['nextrun']
            LOG.debug("interval_after_enable " + str(interval_after_enable))
            LOG.debug("next_run_time_after_enable" +
                      str(next_run_time_after_enable))
            scheduled_start_time_periods = ''.join(
                [i for i in scheduled_start_time if not i.isdigit()])
            scheduled_start_time = ''.join(
                [i for i in scheduled_start_time if not i.isalpha()])
            current_time = int(time.time())
            LOG.debug("current_time " + str(current_time))
            start_time = current_time + next_run_time_after_enable
            LOG.debug("start_time " + str(start_time))
            time3hours = datetime.datetime.utcfromtimestamp(start_time)
            start_time_in_hours = time3hours.strftime('%I:%M %p')
            start_time_in_periods = ''.join(
                [i for i in start_time_in_hours if not i.isdigit()])
            start_time_in_hours = ''.join(
                [i for i in start_time_in_hours if not i.isalpha()])
            LOG.debug("start_time_in_hours " + str(start_time_in_hours))

            #Calculate difference between times in minutes
            timeA = datetime.datetime.strptime(scheduled_start_time.strip(),
                                               "%H:%M")
            timeB = datetime.datetime.strptime(start_time_in_hours.strip(),
                                               "%H:%M")
            newTime = timeA - timeB
            timedelta = newTime.seconds / 60

            #Condition for Interval value and time difference should not be more than two minutes and time periods AM/PM
            if timedelta < 2 and scheduled_start_time_periods == start_time_in_periods and interval == interval_after_enable:
                reporting.add_test_step(
                    "Verify Interval and Next snapshot run time values are correct",
                    tvaultconf.PASS)
                LOG.debug(
                    "Interval and Next snapshot run time values are correct")
            else:
                reporting.add_test_step(
                    "Verify Interval and Next snapshot run time values are correct",
                    tvaultconf.FAIL)
                raise Exception(
                    "Interval and Next snapshot run time values are incorrect")

            reporting.test_case_to_write()

        except Exception as e:
            LOG.error("Exception: " + str(e))
            reporting.set_test_script_status(tvaultconf.FAIL)
            reporting.test_case_to_write()

        finally:
            #Delete workload
            status = self.workload_delete(self.wid)
            time.sleep(10)