def test_01_expired_license(self): reporting.add_test_script(str(__name__) + "_expired_license") try: #Create license using CLI command self.cmd = command_argument_string.license_create + tvaultconf.expired_license_filename LOG.debug("License create command: " + str(self.cmd)) rc = cli_parser.cli_returncode(self.cmd) LOG.debug("rc value: " + str(rc)) if rc != 0: reporting.add_test_step( "Execute license_create command with expired license", tvaultconf.FAIL) raise Exception("Command not executed correctly") else: reporting.add_test_step( "Execute license_create command with expired license", tvaultconf.PASS) LOG.debug("Command executed correctly") out = self.get_license_check() LOG.debug("license-check API output: " + str(out)) if (str(out).find('License expired') != -1): reporting.add_test_step("Verify license expiration message", tvaultconf.PASS) else: reporting.add_test_step("Verify license expiration message", tvaultconf.FAIL) raise Exception( "Incorrect license expiration message displayed") reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def create_snapshot(self, workload_id, is_full=True): if is_full: substitution = 'Full' else: substitution = 'Incremental' snapshot_id, command_execution, snapshot_execution = self.workload_snapshot_cli( workload_id, is_full=is_full) if command_execution == 'pass': reporting.add_test_step( "{} snapshot command execution".format(substitution), tvaultconf.PASS) LOG.debug("Command executed correctly for full snapshot") else: reporting.add_test_step( "{} snapshot command execution".format(substitution), tvaultconf.FAIL) raise Exception( "Command did not execute correctly for full snapshot") if snapshot_execution == 'pass': reporting.add_test_step("{} snapshot".format(substitution), tvaultconf.PASS) else: reporting.add_test_step("{} snapshot".format(substitution), tvaultconf.FAIL) raise Exception("Full snapshot failed") return (snapshot_id)
def test_create_license(self): # Create license using CLI command self.cmd = command_argument_string.license_create + tvaultconf.compute_license_filename LOG.debug("License create command: " + str(self.cmd)) rc = cli_parser.cli_returncode(self.cmd) if rc != 0: reporting.add_test_step( "Execute license_create command", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) raise Exception("Command did not execute correctly") else: reporting.add_test_step( "Execute license_create command", tvaultconf.PASS) LOG.debug("Command executed correctly") # Verification self.license_data = self.get_license_list() LOG.debug("License data returned: " + str(self.license_data)) if(len(self.license_data.keys()) != 0): reporting.add_test_step("License verification", tvaultconf.PASS) else: reporting.add_test_step("License verification", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) raise Exception("License not added") reporting.test_case_to_write()
def test_4_filesearch_latesttwosnapshots(self): reporting.add_test_script(str(__name__) + "_latesttwosnapshots") try: global instances_ids global snapshot_ids # Run Filesearch on vm-1 with latest snapshots vmid_to_search = instances_ids[0] filepath_to_search = "/File_1" snapshot_ids_tosearch = [] start_snapshot = 0 end_snapshot = 2 filecount_in_snapshots = { snapshot_ids[0]: 0, snapshot_ids[1]: 0, snapshot_ids[2]: 0, snapshot_ids[3]: 1} filesearch_id = self.filepath_search( vmid_to_search, filepath_to_search, snapshot_ids_tosearch, start_snapshot, end_snapshot) snapshot_wise_filecount = self.verifyFilepath_Search( filesearch_id, filepath_to_search) for snapshot_id in snapshot_wise_filecount.keys(): if filecount_in_snapshots[snapshot_id] == snapshot_wise_filecount[snapshot_id] and snapshot_id in snapshot_ids[2:]: filesearch_status = True else: filesearch_status = False LOG.debug( "Filepath Search for latesttwosnapshots unsuccessful") reporting.add_test_step( "Verification of Filepath serach for latesttwosnapshots", tvaultconf.FAIL) raise Exception( "Filesearch for latesttwosnapshots does not execute correctly") if filesearch_status: LOG.debug("Filepath_Search for latesttwosnapshots successful") reporting.add_test_step( "Verification of Filepath serach for latesttwosnapshots", tvaultconf.PASS) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_2_umount_snapshot(self): reporting.add_test_script(str(__name__) + "_umount_snapshot") try: global instances_ids global snapshot_ids global wid global security_group_id global volumes_ids global fvm_id global floating_ips_list instances_ids = instances_ids snapshot_ids = snapshot_ids wid = wid volumes_ids = volumes_ids security_group_id = security_group_id fvm_id = fvm_id unmount_snapshot_id = snapshot_ids[0] floating_ips_list = floating_ips_list LOG.debug("unmount snapshot") is_unmounted = self.unmount_snapshot(wid, unmount_snapshot_id) LOG.debug("VALUE OF is_unmounted: " + str(is_unmounted)) if is_unmounted == True: LOG.debug("unmount snapshot with full snapshot is successful") reporting.add_test_step( "Verification of unmount snapshot with full snapshot", tvaultconf.PASS) else: LOG.debug( "unmount snapshot with full snapshot is unsuccessful") reporting.add_test_step( "Verification of unmount snapshot with full snapshot", tvaultconf.FAIL) raise Exception( "Snapshot unmount with full_snapshot does not execute correctly" ) LOG.debug("validate that snapshot is unmounted from FVM") ssh = self.SshRemoteMachineConnectionWithRSAKey( str(floating_ips_list[1]), CONF.compute.fvm_ssh_user) output_list = self.validate_snapshot_mount(ssh) ssh.close() if output_list == '': LOG.debug("Unmounting successful") reporting.add_test_step("Unmounting of a full snapshot", tvaultconf.PASS) else: LOG.debug("Unmounting unsuccessful") reporting.add_test_step("Unmounting of a full snapshot", tvaultconf.FAIL) raise Exception("Unmouting of a snapshot failed") reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_tvault1035_list_workload(self): try: # Prerequisites self.created = False self.workload_instances = [] # Launch instance self.vm_id = self.create_vm() LOG.debug("VM ID: " + str(self.vm_id)) # Create volume self.volume_id = self.create_volume() LOG.debug("Volume ID: " + str(self.volume_id)) # Attach volume to the instance self.attach_volume(self.volume_id, self.vm_id) LOG.debug("Volume attached") # Create workload self.workload_instances.append(self.vm_id) self.wid = self.workload_create( self.workload_instances, tvaultconf.parallel, workload_name=tvaultconf.workload_name) LOG.debug("Workload ID: " + str(self.wid)) # List available workloads using CLI command rc = cli_parser.cli_returncode( command_argument_string.workload_list) if rc != 0: reporting.add_test_step( "Execute workload-list command", tvaultconf.FAIL) raise Exception("Command did not execute correctly") else: reporting.add_test_step( "Execute workload-list command", tvaultconf.PASS) LOG.debug("Command executed correctly") wc = query_data.get_available_workloads() out = cli_parser.cli_output(command_argument_string.workload_list) if (int(wc) == int(out)): reporting.add_test_step( "Verification with DB", tvaultconf.PASS) LOG.debug( "Workload list command listed available workloads correctly") else: reporting.add_test_step( "Verification with DB", tvaultconf.FAIL) raise Exception( "Workload list command did not list available workloads correctly, from db: " + str(wc) + " , from cmd: " + str(out)) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_chargeback_api(self): try: if self.exception != "": LOG.debug("pre req failed") reporting.add_test_step(str(self.exception), tvaultconf.FAIL) raise Exception(str(self.exception)) LOG.debug("pre req completed") # Run getVMProtected API : vm_protected = self.getVMProtected() if not vm_protected: reporting.add_test_step("Verified getVMProtected API", tvaultconf.FAIL) LOG.debug("getVMProtected API failed") raise Exception("getVMProtected API Failed") else: reporting.add_test_step("Verified getVMProtected API", tvaultconf.PASS) # Verify Instance ID : vm_id = self.vm_id counter = 0 vm_protected_list = vm_protected['protected_vms'] for vm in vm_protected_list: openstack_vm_id = vm_protected_list[counter]['id'] LOG.debug(" Openstack VM ID : " + openstack_vm_id) if (vm_id == openstack_vm_id): LOG.debug(" VM ID : " + vm_id) instance_found = True break else: instance_found = False counter = counter + 1 if (instance_found): reporting.add_test_step(" Verified Instance ID ", tvaultconf.PASS) else: reporting.add_test_step(" Verified Instance ID ", tvaultconf.FAIL) raise Exception(" Verification for instance id failed ") reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_1_create_full_snapshot(self): try: reporting.add_test_script(str(__name__) + "_create_full_snapshot") global vm_id global volume_id global workload_id global snapshot_id workload_id = self.wid vm_id = self.vm_id volume_id = self.volume_id LOG.debug("workload is:" + str(workload_id)) LOG.debug("vm id: " + str(vm_id)) LOG.debug("volume id: " + str(volume_id)) self.created = False # Create snapshot with CLI command create_snapshot = command_argument_string.snapshot_create + workload_id LOG.debug("Create snapshot command: " + str(create_snapshot)) rc = cli_parser.cli_returncode(create_snapshot) if rc != 0: reporting.add_test_step( "Execute workload-snapshot command with --full", tvaultconf.FAIL) raise Exception("Command did not execute correctly") else: reporting.add_test_step( "Execute workload-snapshot command with --full", tvaultconf.PASS) LOG.debug("Command executed correctly") snapshot_id = query_data.get_inprogress_snapshot_id(workload_id) LOG.debug("Snapshot ID: " + str(snapshot_id)) wc = self.wait_for_snapshot_tobe_available(workload_id, snapshot_id) if (str(wc) == "available"): reporting.add_test_step("Full snapshot", tvaultconf.PASS) LOG.debug("Workload snapshot successfully completed") self.created = True else: if (str(wc) == "error"): pass if (self.created == False): reporting.add_test_step("Full snapshot", tvaultconf.FAIL) raise Exception("Workload snapshot did not get created") reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_6_filesearch_wildcard_star(self): reporting.add_test_script(str(__name__) + "_wildcard_star") try: global instances_ids global snapshot_ids # Run Filesearch on vm-2 vmid_to_search = instances_ids[1] filepath_to_search = "/File*" filecount_in_snapshots = { snapshot_ids[0]: 0, snapshot_ids[1]: 0, snapshot_ids[2]: 2, snapshot_ids[3]: 2 } filesearch_id = self.filepath_search(vmid_to_search, filepath_to_search) snapshot_wise_filecount = self.verifyFilepath_Search( filesearch_id, filepath_to_search) for snapshot_id in filecount_in_snapshots.keys(): if snapshot_wise_filecount[ snapshot_id] == filecount_in_snapshots[snapshot_id]: filesearch_status = True else: filesearch_status = False LOG.debug( "Filepath Search with wildcard_star unsuccessful") reporting.add_test_step( "Verification of Filepath serach with wildcard_star", tvaultconf.FAIL) raise Exception( "Filesearch wildcard_star does not execute correctly") if filesearch_status == True: LOG.debug("Filepath_Search with wildcard_star successful") reporting.add_test_step( "Verification of Filepath serach with wildcard_star", tvaultconf.PASS) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_2_create_incremental_snapshot(self): try: reporting.add_test_script( str(__name__) + "_create_incremental_snapshot") global workload_id self.created = False LOG.debug("workload is:" + str(workload_id)) # Create incremental snapshot using CLI command create_snapshot = command_argument_string.incr_snapshot_create + workload_id LOG.debug("Create snapshot command: " + str(create_snapshot)) rc = cli_parser.cli_returncode(create_snapshot) if rc != 0: reporting.add_test_step("Execute workload-snapshot command", tvaultconf.FAIL) raise Exception("Command did not execute correctly") else: reporting.add_test_step("Execute workload-snapshot command", tvaultconf.PASS) LOG.debug("Command executed correctly") self.incr_snapshot_id = query_data.get_inprogress_snapshot_id( workload_id) LOG.debug("Incremental Snapshot ID: " + str(self.incr_snapshot_id)) # Wait for incremental snapshot to complete wc = self.wait_for_snapshot_tobe_available(workload_id, self.incr_snapshot_id) if (str(wc) == "available"): reporting.add_test_step("Incremental snapshot", tvaultconf.PASS) LOG.debug( "Workload incremental snapshot successfully completed") self.created = True if (self.created == False): reporting.add_test_step("Incremental snapshot", tvaultconf.FAIL) raise Exception( "Workload incremental snapshot did not get created") # Cleanup # Delete snapshot self.snapshot_delete(workload_id, self.incr_snapshot_id) LOG.debug("Incremental Snapshot deleted successfully") reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_4_delete_snapshot(self): try: global workload_id global snapshot_id global volume_id global vm_id reporting.add_test_script(str(__name__) + "_delete_snapshot") # Delete snapshot using CLI command rc = cli_parser.cli_returncode( command_argument_string.snapshot_delete + snapshot_id) if rc != 0: reporting.add_test_step("Execute snapshot-delete command", tvaultconf.FAIL) raise Exception("Command did not execute correctly") else: reporting.add_test_step("Execute snapshot-delete command", tvaultconf.PASS) LOG.debug("Command executed correctly") time.sleep(5) wc = query_data.get_workload_snapshot_delete_status( tvaultconf.snapshot_name, tvaultconf.snapshot_type_full, snapshot_id) LOG.debug("Snapshot Delete status: " + str(wc)) if (str(wc) == "1"): reporting.add_test_step("Verification", tvaultconf.PASS) LOG.debug("Workload snapshot successfully deleted") else: reporting.add_test_step("Verification", tvaultconf.FAIL) raise Exception("Snapshot did not get deleted") # Cleanup # Delete volume self.volume_snapshots = self.get_available_volume_snapshots() self.delete_volume_snapshots(self.volume_snapshots) # Delete workload self.workload_delete(workload_id) # Delete vm self.delete_vm(vm_id) # Delete volume self.delete_volume(volume_id) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_3_get_tenant_details(self): try: reporting.add_test_script(str(__name__) + "_get_tenant_details") tenant_usage = self.getTenantUsage() LOG.debug("Tenant details are : " + str(tenant_usage)) wkld_name = tvaultconf.workload_name if len(tenant_usage) > 0: LOG.debug("Tenant details returns successfully") reporting.add_test_step("Tenant Details ", tvaultconf.PASS) else: LOG.debug("Tenant details API does not return anything") reporting.add_test_step("Tenant Details", tvaultconf.FAIL) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_bootfromvol_fullsnapshot(self): try: #Create full snapshot self.snapshot_id = self.workload_snapshot(self.workload_id, True, snapshot_cleanup=False) self.wait_for_workload_tobe_available(self.workload_id) if (self.getSnapshotStatus(self.workload_id, self.snapshot_id) == "available"): reporting.add_test_step( "Create full snapshot of boot from volume instance", tvaultconf.PASS) else: reporting.add_test_step( "Create full snapshot of boot from volume instance", tvaultconf.FAIL) raise Exception("Snapshot creation failed") #Cleanup #Delete Snapshot self.snapshot_delete(self.workload_id, self.snapshot_id) #Delete volume self.volume_snapshots = self.get_available_volume_snapshots() self.delete_volume_snapshots(self.volume_snapshots) #Delete workload self.workload_delete(self.workload_id) #Delete vm self.delete_vm(self.vm_id) #Delete volume self.delete_volume(self.volume_id) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_2_get_storage_details(self): try: reporting.add_test_script(str(__name__) + "_get_storage_details") storage_usage = self.getStorageUsage() LOG.debug("Storage details are : " + str(storage_usage)) wkld_name = tvaultconf.workload_name if len(storage_usage) > 0 and storage_usage[0][ 'total_capacity_humanized'] is not None: LOG.debug("storage details returns successfully") reporting.add_test_step("Storage Details ", tvaultconf.PASS) else: LOG.debug("storage detailsAPI does not return anything") reporting.add_test_step("Storage Details", tvaultconf.FAIL) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def multiple_workloads(self, vms): wls = {} wln = int(len(vms) / 3) vmscopy = [] vmscopy = [*vms] LOG.debug("\nvms : {}\n".format(vms)) l1 = [vmscopy[i:i + 3] for i in range(0, len(vmscopy), 3)] LOG.debug(l1) i = 0 for each in l1: i += 1 workload_id = self.workload_create(each, tvaultconf.parallel, workload_cleanup=True) LOG.debug("Workload ID: " + str(workload_id)) if (workload_id is not None): self.wait_for_workload_tobe_available(workload_id) if (self.getWorkloadStatus(workload_id) == "available"): reporting.add_test_step("Create workload-{}".format(i), tvaultconf.PASS) else: reporting.add_test_step("Create workload-{}".format(i), tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) else: reporting.add_test_step("Create workload-{}".format(i), tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) raise Exception("Workload creation failed") wls[workload_id] = [] for vm in each: wls[workload_id].append([vm, vms[vm]]) return (wls)
def bootfromvol_workload(self): try: self.exception = "" self.total_workloads = 1 self.vms_per_workload = 1 self.workload_instances = [] self.workload_volumes = [] for vm in range(0, self.vms_per_workload): self.volume_id = self.create_volume( size=tvaultconf.bootfromvol_vol_size, image_id=CONF.compute.image_ref, volume_cleanup=False) self.workload_volumes.append(self.volume_id) self.set_volume_as_bootable(self.volume_id) self.block_mapping_details = [{ "source_type": "volume", "delete_on_termination": "false", "boot_index": 0, "uuid": self.volume_id, "destination_type": "volume" }] self.vm_id = self.create_vm( image_id="", block_mapping_data=self.block_mapping_details, vm_cleanup=False) self.workload_instances.append(self.vm_id) # Create workload self.workload_id = self.workload_create(self.workload_instances, tvaultconf.parallel, workload_cleanup=False) if (self.wait_for_workload_tobe_available(self.workload_id) == False): reporting.add_test_step("Create_Workload", tvaultconf.FAIL) raise Exception("Workload creation failed") self.workload_status = self.getWorkloadStatus(self.workload_id) except Exception as err: self.exception = err LOG.error("Exception" + str(self.exception))
def test_1_get_audit_log(self): try: reporting.add_test_script(str(__name__) + "_get_audit_log") global vm_id global volume_id global workload_id global snapshot_id workload_id = self.wid vm_id = self.vm_id volume_id = self.volume_id LOG.debug("workload is:" + str(workload_id)) LOG.debug("vm id: " + str(vm_id)) LOG.debug("volume id: " + str(volume_id)) snapshot_id = self.workload_snapshot(workload_id, True) wc = self.wait_for_snapshot_tobe_available(workload_id, snapshot_id) if (str(wc) == "available"): reporting.add_test_step("Full snapshot", tvaultconf.PASS) LOG.debug("Workload snapshot successfully completed") self.created = True else: if (str(wc) == "error"): pass if (self.created == False): reporting.add_test_step("Full snapshot", tvaultconf.FAIL) raise Exception("Workload snapshot did not get created") audit_log = self.getAuditLog() LOG.debug("Audit logs are : " + str(audit_log)) wkld_name = tvaultconf.workload_name if len(audit_log) >= 0 and str(audit_log).find(wkld_name): LOG.debug("audit log API returns log successfully") reporting.add_test_step("Audit Log", tvaultconf.PASS) else: LOG.debug("audit log API does not return anything") reporting.add_test_step("Audit Log", tvaultconf.FAIL) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_5_config_workload_configure_invalid_db_pass(self): reporting.add_test_script(str(__name__) + "_invalid_db_pass") try: # add sudo access for config user self.sudo_access_config_user(access=True) # for config backup configuration, yaml_file creation with random db_password self.create_config_backup_yaml(db_password=str(int(time.time()))) # config backup configuration with CLI command config_workload_command = command_argument_string.config_workload_configure + " --config-file yaml_file.yaml --authorized-key config_backup_pvk " LOG.debug("config workload configure cli command: " + str(config_workload_command)) rc = cli_parser.cli_returncode(config_workload_command) if rc != 0: reporting.add_test_step( "Triggering config_workload_configure command via CLI", tvaultconf.PASS) LOG.debug("Command executed correctly") else: reporting.add_test_step( "Triggering config_workload_configure command via CLI", tvaultconf.FAIL) LOG.debug("Command executed incorrectly") # delete config_user self.delete_config_user() reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_4_config_backup_delete(self): global config_backup_id reporting.add_test_script(str(__name__) + "_config_backup_delete: cli") try: # test_config_backup_delete # config backup configuration with CLI command config_backup_delete_command = command_argument_string.config_backup_delete + " " + str( config_backup_id) LOG.debug("config backup delete cli command: " + str(config_backup_delete_command)) rc = cli_parser.cli_returncode(config_backup_delete_command) if rc != 0: reporting.add_test_step( "Triggering config_backup_delete command via CLI", tvaultconf.FAIL) raise Exception("Command did not execute correctly") else: reporting.add_test_step( "Triggering config_backup_delete command via CLI", tvaultconf.PASS) LOG.debug("Command executed correctly") config_backup_id_after_deletion = query_data.get_config_backup_id() LOG.debug("Config backup id after: " + str(config_backup_id_after_deletion)) if config_backup_id_after_deletion == config_backup_id: reporting.add_test_step("Config Backup Deletion", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) else: reporting.add_test_step("Config Backup Deletion", tvaultconf.PASS) # delete config_user self.delete_config_user() reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_02_invalid_license(self): reporting.add_test_script(str(__name__) + "_invalid_license") try: # Create license using CLI command self.cmd = command_argument_string.license_create + \ tvaultconf.invalid_license_filename LOG.debug("License create command: " + str(self.cmd)) rc = cli_parser.cli_returncode(self.cmd) if rc == 0: reporting.add_test_step( "Execute license_create command with invalid license", tvaultconf.PASS) LOG.debug("Command executed correctly") else: reporting.add_test_step( "Execute license_create command with invalid license", tvaultconf.FAIL) raise Exception("Command not executed correctly") self.license_txt = "" # Get license key content with open(tvaultconf.invalid_license_filename) as f: for line in f: self.license_txt += line LOG.debug("License text: " + str(self.license_txt)) out = self.create_license(tvaultconf.invalid_license_filename, self.license_txt) LOG.debug("license-create API output: " + str(out)) if (str(out).find('Cannot verify the license signature') != -1): reporting.add_test_step("Verify error message", tvaultconf.PASS) else: reporting.add_test_step("Verify error message", tvaultconf.FAIL) raise Exception("Incorrect error message displayed") reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_2_config_backup_list(self): global config_backup_id reporting.add_test_script(str(__name__) + "_config_backup_list: api") try: # test config_backup_list config_backup_list_output = self.get_config_backup_list() LOG.debug("config_backup list output: " + str(config_backup_list_output)) if config_backup_list_output != "": reporting.add_test_step("Config_backup_list", tvaultconf.PASS) else: reporting.add_test_step("Config_backup_list", tvaultconf.FAIL) config_backups_list = config_backup_list_output['backups'] config_backup_found = False LOG.debug("Finding config backup id: " + str(config_backup_id)) for backup in config_backups_list: if backup['id'] == str(config_backup_id): config_backup_found = True LOG.debug("config_backup_id found in config_backups_list") break if config_backup_found: reporting.add_test_step( "config_backup_id in config_backups_list", tvaultconf.PASS) else: LOG.debug("config_backup_id not found in config_backups_list") reporting.add_test_step( "config_backup_id in config_backups_list", tvaultconf.FAIL) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_tvault1063_bootfromvol_restore(self): try: #Create full snapshot self.snapshot_id = self.workload_snapshot(self.workload_id, True) self.wait_for_workload_tobe_available(self.workload_id) if (self.getSnapshotStatus(self.workload_id, self.snapshot_id) == "available"): reporting.add_test_step( "Create full snapshot of boot from volume instance", tvaultconf.PASS) else: reporting.add_test_step( "Create full snapshot of boot from volume instance", tvaultconf.FAIL) raise Exception("Snapshot creation failed") self.delete_vms(self.workload_instances) #Trigger oneclick restore self.restore_id = self.snapshot_restore(self.workload_id, self.snapshot_id) self.wait_for_workload_tobe_available(self.workload_id) if (self.getRestoreStatus(self.workload_id, self.snapshot_id, self.restore_id) == "available"): reporting.add_test_step( "Oneclick restore of boot from volume instance", tvaultconf.PASS) else: reporting.add_test_step( "Oneclick restore of boot from volume instance", tvaultconf.FAIL) raise Exception("Oneclick restore failed") reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_3_list_snapshot(self): try: reporting.add_test_script(str(__name__) + "_list_snapshot") # List snapshots using CLI command rc = cli_parser.cli_returncode( command_argument_string.snapshot_list) if rc != 0: reporting.add_test_step("Execute snapshot-list command", tvaultconf.FAIL) raise Exception("Command did not execute correctly") else: reporting.add_test_step("Execute snapshot-list command", tvaultconf.PASS) LOG.debug("Command executed correctly") wc = query_data.get_available_snapshots() out = cli_parser.cli_output(command_argument_string.snapshot_list) if (int(wc) == int(out)): reporting.add_test_step("Verification with DB", tvaultconf.PASS) LOG.debug( "Snapshot list command listed available snapshots correctly" ) else: reporting.add_test_step("Verification with DB", tvaultconf.FAIL) raise Exception( "Snapshot list command did not list available snapshots correctly" ) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_tvault1031_show_workloadtype(self): try: #Get workload type details using CLI command rc = cli_parser.cli_returncode( command_argument_string.workload_type_show) if rc != 0: reporting.add_test_step("Execute workload-type-show command", tvaultconf.FAIL) raise Exception("Command did not execute correctly") else: reporting.add_test_step("Execute workload-type-show command", tvaultconf.PASS) LOG.debug("Command executed correctly") db_resp = query_data.get_workload_type_data( tvaultconf.workload_type_id) LOG.debug("Response from DB: " + str(db_resp)) cmd_resp = cli_parser.cli_output( command_argument_string.workload_type_show) LOG.debug("Response from CLI: " + str(cmd_resp)) if (db_resp[5] == tvaultconf.workload_type_id): reporting.add_test_step("Verification with DB", tvaultconf.PASS) LOG.debug("Workload type response from CLI and DB match") else: reporting.add_test_step("Verification with DB", tvaultconf.FAIL) raise Exception( "Workload type response from CLI and DB do not match") reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def _compare_trust(self): self.trust_check_flag = True self.trust_after_upgrade = self.get_trust_list() LOG.debug("Trust imported on upgrade: " + str(self.trust_after_upgrade)) LOG.debug("Trust before upgrade: " + str(upgrade_data_conf.trust_details)) for i in range(0, len(upgrade_data_conf.trust_details)): for key in upgrade_data_conf.trust_details[i].keys(): if self.trust_after_upgrade: if (key == "metadata"): for j in range( 0, len(upgrade_data_conf.trust_details[i][key])): for k in upgrade_data_conf.trust_details[i][key][ j].keys(): if (upgrade_data_conf.trust_details[i][key][j] [k] == self.trust_after_upgrade[i][key][j] [k]): LOG.debug("Trust metadata '" + str(k) + "' imported correctly") else: self.trust_check_flag = False reporting.add_test_step( "Trust metadata '" + str(k) + "' not imported correctly", tvaultconf.FAIL) reporting.set_test_script_status( tvaultconf.FAIL) elif (upgrade_data_conf.trust_details[i][key] == self.trust_after_upgrade[i][key]): LOG.debug("Trust data '" + str(key) + "' imported correctly") else: self.trust_check_flag = False reporting.add_test_step( "Trust data '" + str(key) + "' not imported correctly", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) else: self.trust_check_flag = False reporting.add_test_step("Trust not imported correctly", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) return self.trust_check_flag
def _compare_license(self): self.license_check_flag = True self.license_after_upgrade = self.get_license_list() LOG.debug("License imported on upgrade: " + str(self.license_after_upgrade)) LOG.debug("License before upgrade: " + str(upgrade_data_conf.license_details)) for key in upgrade_data_conf.license_details.keys(): if self.license_after_upgrade: if (key == "metadata"): for k in upgrade_data_conf.license_details[key][0].keys(): if (upgrade_data_conf.license_details[key][0][k] == self.license_after_upgrade[key][0][k]): LOG.debug("License metadata '" + str(k) + "' imported correctly") else: self.license_check_flag = False reporting.add_test_step( "License metadata '" + str(k) + "' not imported correctly", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) elif (self.license_after_upgrade[key] == upgrade_data_conf.license_details[key]): LOG.debug("License data '" + str(key) + "' imported correctly") else: self.license_check_flag = False reporting.add_test_step( "License data '" + str(key) + "' not imported correctly", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) else: self.license_check_flag = False reporting.add_test_step("License not imported correctly", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) return self.license_check_flag
def test_network_restore(self): try: reporting.add_test_script(str(__name__)) self.delete_network_topology() vms = [] ntwrks = self.create_network() for network in ntwrks: if network['name'] in ['Private-1', 'Private-2', 'Private-5']: vm_name = "instance-{}".format(network['name']) vmid = self.create_vm(vm_name=vm_name, networkid=[ {'uuid': network['id']}], vm_cleanup=True) vms.append((vm_name, vmid)) LOG.debug("Launched vms : {}".format(vms)) nws = [x['id'] for x in ntwrks] nt_bf, sbnt_bf, rt_bf, intf_bf = self.get_topology_details() vms_ids = [x[1] for x in vms] workload_id = self.workload_create( vms_ids, tvaultconf.parallel, workload_cleanup=True) LOG.debug("Workload ID: " + str(workload_id)) if(workload_id is not None): self.wait_for_workload_tobe_available(workload_id) if(self.getWorkloadStatus(workload_id) == "available"): reporting.add_test_step("Create workload", tvaultconf.PASS) else: reporting.add_test_step("Create workload", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) else: reporting.add_test_step("Create workload", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) raise Exception("Workload creation failed") snapshot_id = self.workload_snapshot( workload_id, True, snapshot_cleanup=True) time.sleep(5) self.wait_for_workload_tobe_available(workload_id) if(self.getSnapshotStatus(workload_id, snapshot_id) == "available"): reporting.add_test_step( "Create full snapshot", tvaultconf.PASS) LOG.debug("Full snapshot available!!") else: reporting.add_test_step( "Create full snapshot", tvaultconf.FAIL) raise Exception("Snapshot creation failed") instance_details = [] for vm in vms: temp_instance_data = {'id': vm[1], 'include': True, 'restore_boot_disk': True, 'name': vm[0] + "restored_instance", 'vdisks': [] } instance_details.append(temp_instance_data) LOG.debug("Instance details for restore: " + str(instance_details)) vm_details_bf = {} for vm in vms: vm_details_bf[vm[0]] = self.get_vm_details(vm[1])['server'] self.delete_vm(vm[1]) self.delete_network_topology() restore_id = self.snapshot_selective_restore( workload_id, snapshot_id, restore_name=tvaultconf.restore_name, instance_details=instance_details, network_restore_flag=True, restore_cleanup=True) self.wait_for_snapshot_tobe_available(workload_id, snapshot_id) if(self.getRestoreStatus(workload_id, snapshot_id, restore_id) == "available"): reporting.add_test_step( "Selective restore with network restore", tvaultconf.PASS) else: reporting.add_test_step( "Selective restore with network restore", tvaultconf.FAIL) raise Exception( "Selective restore with network restore failed") nt_af, sbnt_af, rt_af, intf_af = self.get_topology_details() if nt_bf == nt_af: reporting.add_test_step( "Verify network details after network restore", tvaultconf.PASS) else: reporting.add_test_step( "Verify network details after network restore", tvaultconf.FAIL) LOG.error( "Network details before and after restore: {0}, {1}".format( nt_bf, nt_af)) if sbnt_bf == sbnt_af: reporting.add_test_step( "Verify subnet details after network restore", tvaultconf.PASS) else: reporting.add_test_step( "Verify subnet details after network restore", tvaultconf.FAIL) LOG.error( "Subnet details before and after restore: {0}, {1}".format( sbnt_bf, sbnt_af)) if rt_bf == rt_af: reporting.add_test_step( "Verify router details after network restore", tvaultconf.PASS) else: reporting.add_test_step( "Verify router details after network restore", tvaultconf.FAIL) LOG.error( "Router details before and after restore: {0}, {1}".format( rt_bf, rt_af)) if intf_bf == intf_af: reporting.add_test_step( "Verify interface details after network restore", tvaultconf.PASS) else: reporting.add_test_step( "Verify interface details after network restore", tvaultconf.FAIL) LOG.error( "Interface details before and after restore: {0}, {1}".format( intf_bf, intf_af)) vm_details_af = {} restored_vms = self.get_restored_vm_list(restore_id) for vm in restored_vms: vm_details = self.get_vm_details(vm)['server'] vm_details_af[vm_details['name'].replace( 'restored_instance', '')] = vm_details klist = sorted([*vm_details_bf]) for vm in klist: netname = [*vm_details_bf[vm]['addresses']][0] vm_details_bf[vm]['addresses'][netname][0]['OS-EXT-IPS-MAC:mac_addr'] = '' vm_details_af[vm]['addresses'][netname][0]['OS-EXT-IPS-MAC:mac_addr'] = '' vm_details_bf[vm]['links'][1]['href'] = '' vm_details_af[vm]['links'][1]['href'] = '' vm_details_af[vm]['metadata']['config_drive'] = '' vm_details_af[vm]['metadata']['ordered_interfaces'] = '' vm_details_bf[vm]['links'] = '' vm_details_af[vm]['links'] = '' vm_details_bf[vm]['OS-EXT-SRV-ATTR:instance_name'] = '' vm_details_af[vm]['OS-EXT-SRV-ATTR:instance_name'] = '' vm_details_bf[vm]['updated'] = '' vm_details_af[vm]['updated'] = '' vm_details_bf[vm]['created'] = '' vm_details_af[vm]['created'] = '' vm_details_bf[vm]['id'] = '' vm_details_af[vm]['id'] = '' vm_details_bf[vm]['OS-SRV-USG:launched_at'] = '' vm_details_af[vm]['OS-SRV-USG:launched_at'] = '' vm_details_af[vm]['name'] = vm_details_af[vm]['name'].replace( 'restored_instance', '') if vm_details_bf == vm_details_af: reporting.add_test_step( "Verify instance details after restore", tvaultconf.PASS) else: reporting.add_test_step( "Verify instance details after restore", tvaultconf.FAIL) LOG.error( "Instance details before and after restore: {0}, {1}".format( vm_details_bf, vm_details_af)) for rvm in restored_vms: self.delete_vm(rvm) self.delete_network_topology() reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write
def test_tvault_rbac_nonadmin_ableto(self): try: # Use non-admin credentials os.environ['OS_USERNAME'] = CONF.identity.nonadmin_user os.environ['OS_PASSWORD'] = CONF.identity.nonadmin_password self.instances_id = [] # Create volume, Launch an Instance self.volumes_id = self.create_volume(volume_cleanup=False) LOG.debug("Volume-1 ID: " + str(self.volumes_id)) self.instances_id.append(self.create_vm(vm_cleanup=False)) LOG.debug("VM-1 ID: " + str(self.instances_id[0])) self.attach_volume(self.volumes_id, self.instances_id[0]) LOG.debug("Volume attached") # Create workload self.wid = self.workload_create( self.instances_id, tvaultconf.parallel, workload_name=tvaultconf.workload_name) LOG.debug("Workload ID: " + str(self.wid)) workload_available = self.wait_for_workload_tobe_available( self.wid) if workload_available == True: LOG.debug("Workload created successfully") reporting.add_test_step("Verification of workload creation", tvaultconf.PASS) reporting.set_test_script_status(tvaultconf.PASS) else: LOG.debug("workload creation unsuccessful") reporting.add_test_step("Verification of workload creation", tvaultconf.FAIL) raise Exception( "RBAC policy fails for workload creation by non-admin user" ) # Create full snapshot self.snapshot_id = self.workload_snapshot(self.wid, True) LOG.debug("Snapshot ID-1: " + str(self.snapshot_id)) #Wait till snapshot is complete snapshot_status = self.wait_for_snapshot_tobe_available( self.wid, self.snapshot_id) if snapshot_status == "available": LOG.debug("snapshot created successfully") reporting.add_test_step("Verification of snapshot creation", tvaultconf.PASS) reporting.set_test_script_status(tvaultconf.PASS) else: LOG.debug("snapshot creation unsuccessful") reporting.add_test_step("Verification of snapshot creation", tvaultconf.FAIL) raise Exception( "RBAC policy fails for snapshot creation by non-admin user" ) #Delete the original instance self.delete_vm(self.instances_id[0]) LOG.debug("Instance deleted successfully") #Delete corresponding volume self.delete_volume(self.volumes_id) LOG.debug("Volume deleted successfully") #Create one-click restore restore_status = "" restore_name = "restore_1" restore_id = self.snapshot_restore(self.wid, self.snapshot_id, restore_name=restore_name) restore_status = query_data.get_snapshot_restore_status( restore_name, self.snapshot_id) LOG.debug("Snapshot restore status initial: " + str(restore_status)) while (str(restore_status) != "available" and str(restore_status) != "error"): time.sleep(10) restore_status = query_data.get_snapshot_restore_status( restore_name, self.snapshot_id) LOG.debug("Snapshot restore status: " + str(restore_status)) if (str(restore_status) == "available"): LOG.debug("Snapshot Restore successfully completed") reporting.add_test_step( "Snapshot one-click restore verification with DB", tvaultconf.PASS) else: LOG.debug("Snapshot Restore unsuccessful") reporting.add_test_step( "Snapshot one-click restore verification with DB", tvaultconf.FAIL) # Launch recovery instance and Mount snapshot self.recoveryinstances_id = self.create_vm( flavor_id=CONF.compute.flavor_ref_alt, image_id=CONF.compute.fvm_image_ref) LOG.debug("VM-2 ID: " + str(self.recoveryinstances_id)) status = self.mount_snapshot(self.wid, self.snapshot_id, self.recoveryinstances_id) if status == True: LOG.debug("snapshot Mounted successfully") reporting.add_test_step("Verification of snapshot mount", tvaultconf.PASS) else: LOG.debug("snapshot Mount unsuccessful") reporting.add_test_step("Verification of snapshot mount", tvaultconf.FAIL) raise Exception("snapshot does not Mount by non-admin user") # Run Filesearch vmid_to_search = self.instances_id[0] filepath_to_search = "/File_1.txt" filecount_in_snapshots = {self.snapshot_id: 0} filesearch_id = self.filepath_search(vmid_to_search, filepath_to_search) snapshot_wise_filecount = self.verifyFilepath_Search( filesearch_id, filepath_to_search) for snapshot_id in filecount_in_snapshots.keys(): if snapshot_wise_filecount[ snapshot_id] == filecount_in_snapshots[snapshot_id]: filesearch_status = True else: filesearch_status = False LOG.debug("Filepath Search unsuccessful") reporting.add_test_step("Verification of Filepath serach", tvaultconf.FAIL) raise Exception( "Filesearch path does not execute correctly by non-admin user" ) if filesearch_status == True: LOG.debug("Filepath_Search successful") reporting.add_test_step("Verification of Filepath serach", tvaultconf.PASS) reporting.set_test_script_status(tvaultconf.PASS) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_chargeback_api(self): try: if self.exception != "": LOG.debug("pre req failed") reporting.add_test_step(str(self.exception), tvaultconf.FAIL) raise Exception(str(self.exception)) LOG.debug("pre req completed") vm_id = self.vm_id wid = self.wid chargeback_info = self.getTenantChargeback() if not chargeback_info: reporting.add_test_step("Verified Chargeback API", tvaultconf.FAIL) LOG.debug("Verified Chargeback API failed") raise Exception("Verified Chargeback API Failed") else: reporting.add_test_step("Verified Chargeback API", tvaultconf.PASS) workload_id_chargeback = chargeback_info[str( CONF.identity.tenant_id)]['tenant_name'] LOG.debug(" Env Tenant ID: " + CONF.identity.tenant_id) LOG.debug(" Instance ID : " + vm_id) #Verify workload ID openstack_workload_ids = chargeback_info[ CONF.identity.tenant_id]['workloads'].keys() LOG.debug(" Workload ID : " + openstack_workload_ids[1]) for worklad_id in openstack_workload_ids: if (worklad_id == wid): LOG.debug(" Workload ID : " + wid) workload_found = True if (workload_found == True): reporting.add_test_step(" Verified workload id ", tvaultconf.PASS) else: reporting.add_test_step(" Verified workload id ", tvaultconf.FAIL) raise Exception(" Verification for workload id failed ") #Verify Instance ID openstack_instance_ids = chargeback_info[CONF.identity.tenant_id][ 'workloads'][wid]['protected_vms'].keys() LOG.debug(" VM Name : " + openstack_instance_ids[0]) for instance_id in openstack_instance_ids: if (instance_id == vm_id): LOG.debug(" VM ID : " + instance_id) instance_found = True if (instance_found == True): reporting.add_test_step(" Verified instance id ", tvaultconf.PASS) else: reporting.add_test_step(" Verified instance id ", tvaultconf.FAIL) raise Exception(" Varification for instance id failed ") reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_1_snapshot_mount_full(self): reporting.add_test_script(str(__name__) + "_full_snasphot") try: if self.exception != "": LOG.debug("pre req failed") reporting.add_test_step(str(self.exception), tvaultconf.FAIL) raise Exception(str(self.exception)) LOG.debug("pre req completed") global instances_ids global snapshot_ids global wid global security_group_id global volumes_ids global fvm_id global floating_ips_list instances_ids = self.instances_ids snapshot_ids = self.snapshot_ids wid = self.wid volumes_ids = self.volumes_ids security_group_id = self.security_group_id fvm_id = self.fvm_id full_snapshot_id = snapshot_ids[0] floating_ips_list = self.floating_ips_list LOG.debug("mount snasphot of a full snapshot") is_mounted = self.mount_snapshot( wid, full_snapshot_id, fvm_id, mount_cleanup=False) LOG.debug("VALUE OF is_mounted: " + str(is_mounted)) if is_mounted: LOG.debug(" mount snapshot with full snapshot is successful") reporting.add_test_step( "Verification of mount snapshot with full snapshot", tvaultconf.PASS) else: LOG.debug("mount snapshot with full snapshot is unsuccessful") reporting.add_test_step( "Verification of mount snapshot with full snapshot", tvaultconf.FAIL) raise Exception( "Snapshot mount with full_snapshot does not execute correctly") LOG.debug("validate that snapshot is mounted on FVM") ssh = self.SshRemoteMachineConnectionWithRSAKey( str(floating_ips_list[1]), CONF.validation.fvm_ssh_user) output_list = self.validate_snapshot_mount(ssh).decode('UTF-8').split('\n') ssh.close() flag = 0 for i in output_list: if 'vdbb' in i: LOG.debug( "connect to fvm and check mountpoint is mounted on FVM instance") reporting.add_test_step( "Verify that mountpoint mounted is shown on FVM instance", tvaultconf.PASS) flag = 1 if 'File_1' in i: LOG.debug("check that file exists on mounted snapshot") reporting.add_test_step( "Verification of file's existance on moutned snapshot", tvaultconf.PASS) else: LOG.debug("file does not found on FVM instacne") reporting.add_test_step( "Verification of file's existance on moutned snapshot", tvaultconf.FAIL) raise Exception("file does not found on FVM instacne") else: pass if flag == 0: LOG.debug( "mount snapshot with full snapshot is unsuccessful on FVM") reporting.add_test_step( "Verify that mountpoint mounted is shown on FVM instance", tvaultconf.FAIL) LOG.debug("file does not found on FVM instacne") reporting.add_test_step( "Verification of file's existance on moutned snapshot", tvaultconf.FAIL) raise Exception("mountpoint is not showing on FVM instance") else: pass reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()