def test_tvault_rbac_backuprole_touser_policyjson(self): try: workload_create_error_str = "Policy doesn't allow workload:workload_create to be performed." snapshot_create_error_str = "Policy doesn't allow workload:workload_snapshot to be performed." restore_create_error_str = "Policy doesn't allow snapshot:snapshot_restore to be performed." workload_delete_error_str = "Policy doesn't allow workload:workload_delete to be performed." snapshot_delete_error_str = "Policy doesn't allow snapshot:snapshot_delete to be performed." restore_delete_error_str = "Policy doesn't allow restore:restore_delete to be performed." # Change policy.json file on tvault to change role and rule self.change_policyjson_file("backup", "backup_api") self.instances_id = [] # Create volume, Launch an Instance self.volumes_id = self.create_volume(volume_cleanup=False) LOG.debug("Volume-1 ID: " + str(self.volumes_id)) self.instances_id.append(self.create_vm(vm_cleanup=False)) LOG.debug("VM-1 ID: " + str(self.instances_id[0])) self.attach_volume(self.volumes_id, self.instances_id[0]) LOG.debug("Volume attached") # Use backupuser credentials os.environ['OS_USERNAME'] = CONF.identity.backupuser os.environ['OS_PASSWORD'] = CONF.identity.backupuser_password # Create workload with CLI by backup role workload_create = command_argument_string.workload_create + \ " --instance instance-id=" + str(self.instances_id[0]) error = cli_parser.cli_error(workload_create) if error and (str(error.strip('\n')).find('ERROR') != -1): LOG.debug("workload creation unsuccessful by backup role") raise Exception( "RBAC policy fails for workload creation by backup role") else: LOG.debug("Workload created successfully by backup role") reporting.add_test_step( "Execute workload_create command by backup role", tvaultconf.PASS) time.sleep(10) self.wid1 = query_data.get_workload_id( tvaultconf.workload_name) workload_available = self.wait_for_workload_tobe_available( self.wid1) # Run snapshot_create CLI by backup role snapshot_create = command_argument_string.snapshot_create + \ str(self.wid1) LOG.debug("snapshot_create command: " + str(snapshot_create)) error = cli_parser.cli_error(snapshot_create) if error and (str(error.strip('\n')).find('ERROR') != -1): reporting.add_test_step( "Execute snapshot_create command by backup role", tvaultconf.FAIL) raise Exception( "Command snapshot_create did not execute correctly by backup role") else: reporting.add_test_step( "Execute snapshot_create command by backup role", tvaultconf.PASS) LOG.debug( "Command snapshot_create executed correctly by backup role") self.snapshot_id1 = query_data.get_inprogress_snapshot_id( self.wid1) wc = self.wait_for_snapshot_tobe_available( self.wid1, self.snapshot_id1) # Delete the original instance self.delete_vm(self.instances_id[0]) LOG.debug("Instance deleted successfully for restore") # Delete corresponding volume self.delete_volume(self.volumes_id) LOG.debug("Volume deleted successfully for restore") # Create one-click restore using CLI command by backup role restore_command = command_argument_string.oneclick_restore + \ " " + str(self.snapshot_id1) error = cli_parser.cli_error(restore_command) if error and (str(error.strip('\n')).find('ERROR') != -1): reporting.add_test_step( "Execute snapshot-oneclick-restore command by backup role", tvaultconf.FAIL) raise Exception( "Command one-click restore did not execute correctly by backup role") else: reporting.add_test_step( "Execute snapshot-oneclick-restore command by backup role", tvaultconf.PASS) LOG.debug( "Command one-click restore executed correctly backup role") wc = self.wait_for_snapshot_tobe_available( self.wid1, self.snapshot_id1) self.restore_id1 = query_data.get_snapshot_restore_id( self.snapshot_id1) LOG.debug("Restore ID: " + str(self.restore_id1)) self.restore_vm_id1 = self.get_restored_vm_list( self.restore_id1) LOG.debug("Restore VM ID: " + str(self.restore_vm_id1)) self.restore_volume_id1 = self.get_restored_volume_list( self.restore_id1) LOG.debug("Restore Volume ID: " + str(self.restore_volume_id1)) # Use admin credentials os.environ['OS_USERNAME'] = CONF.identity.username os.environ['OS_PASSWORD'] = CONF.identity.password # Create workload with CLI by admin role workload_create = command_argument_string.workload_create + \ " --instance instance-id=" + str(self.restore_vm_id1[0]) error = cli_parser.cli_error(workload_create) if error and (str(error.strip('\n')).find(workload_create_error_str) != -1): LOG.debug( "Command workload_create did not execute correctly by admin role") reporting.add_test_step( "Can not execute workload_create command by admin role", tvaultconf.PASS) else: reporting.add_test_step( "Command workload_create did not execute correctly by admin role", tvaultconf.FAIL) raise Exception( "Command workload_create executed correctly by admin role") # Run snapshot_create CLI by admin role import pdb; pdb.set_trace() snapshot_create = command_argument_string.snapshot_create + \ str(self.wid1) LOG.debug("snapshot_create command: " + str(snapshot_create)) error = cli_parser.cli_error(snapshot_create) if error and (str(error.strip('\n')).find(snapshot_create_error_str) != -1): reporting.add_test_step( "Can not execute snapshot_create command by admin role", tvaultconf.PASS) LOG.debug( "Command snapshot_create did not execute correctly by admin role") else: reporting.add_test_step( "Can not execute snapshot_create command by admin role", tvaultconf.FAIL) raise Exception( "Command snapshot_create executed correctly by admin role") # Create one-click restore using CLI command by admin role restore_command = command_argument_string.oneclick_restore + \ " " + str(self.snapshot_id1) error = cli_parser.cli_error(restore_command) if error and (str(error.strip('\n')).find(restore_create_error_str) != -1): reporting.add_test_step( "Can not execute restore_create command by admin role", tvaultconf.PASS) LOG.debug( "Command restore_create did not execute correctly by admin role") else: reporting.add_test_step( "Can not execute restore_create command by admin role", tvaultconf.FAIL) raise Exception( "Command restore_create executed correctly by admin role") # Run restore_delete CLI by admin role restore_delete = command_argument_string.restore_delete + \ str(self.restore_id1) error = cli_parser.cli_error(restore_delete) if error and (str(error.strip('\n')).find(restore_delete_error_str) != -1): reporting.add_test_step( "Can not execute restore_delete command by admin role", tvaultconf.PASS) LOG.debug( "Command restore_delete did not execute correctly by admin role") else: reporting.add_test_step( "Can not execute restore_delete command by admin role", tvaultconf.FAIL) raise Exception( "Command restore_delete executed correctly by admin role") # Run snapshot_delete CLI by admin role snapshot_delete = command_argument_string.snapshot_delete + \ str(self.snapshot_id1) error = cli_parser.cli_error(snapshot_delete) if error and (str(error.strip('\n')).find(snapshot_delete_error_str) != -1): reporting.add_test_step( "Can not execute snapshot_delete command by admin role", tvaultconf.PASS) LOG.debug( "Command snapshot_delete did not execute correctly by admin role") else: reporting.add_test_step( "Can not execute snapshot_delete command by admin role", tvaultconf.FAIL) raise Exception( "Command snapshot_delete executed correctly by admin role") # Delete workload with CLI by admin role workload_delete = command_argument_string.workload_delete + \ str(self.wid1) error = cli_parser.cli_error(workload_delete) if error and (str(error.strip('\n')).find(workload_delete_error_str) != -1): reporting.add_test_step( "Can not execute workload_delete command by admin role", tvaultconf.PASS) LOG.debug( "Command workload_delete did not execute correctly by admin role") else: reporting.add_test_step( "Can not execute workload_delete command by admin role", tvaultconf.FAIL) raise Exception( "Command workload_delete executed correctly by admin role") # Use nonadmin credentials os.environ['OS_USERNAME'] = CONF.identity.nonadmin_user os.environ['OS_PASSWORD'] = CONF.identity.nonadmin_password # Create workload with CLI by default role workload_create = command_argument_string.workload_create + \ " --instance instance-id=" + str(self.restore_vm_id1) error = cli_parser.cli_error(workload_create) if error and (str(error.strip('\n')).find(workload_create_error_str) != -1): LOG.debug( "Command workload_create did not execute correctly by default role") reporting.add_test_step( "Can not execute workload_create command by default role", tvaultconf.PASS) else: reporting.add_test_step( "Can not execute workload_create command by default role", tvaultconf.FAIL) raise Exception( "Command workload_create executed correctly by default role") # Run snapshot_create CLI by default role snapshot_create = command_argument_string.snapshot_create + \ str(self.wid1) error = cli_parser.cli_error(snapshot_create) if error and (str(error.strip('\n')).find(snapshot_create_error_str) != -1): reporting.add_test_step( "Can not execute snapshot_create command by default role", tvaultconf.PASS) LOG.debug( "Command snapshot_create did not execute correctly by default role") else: reporting.add_test_step( "Can not execute snapshot_create command by default role", tvaultconf.FAIL) raise Exception( "Command snapshot_create executed correctly by default role") # Create one-click restore using CLI by default role restore_command = command_argument_string.oneclick_restore + \ " " + str(self.snapshot_id1) error = cli_parser.cli_error(restore_command) if error and (str(error.strip('\n')).find(restore_create_error_str) != -1): reporting.add_test_step( "Can not execute restore_create command by default role", tvaultconf.PASS) LOG.debug( "Command restore_create did not execute correctly by default role") else: reporting.add_test_step( "Can not execute restore_create command by default role", tvaultconf.FAIL) raise Exception( "Command restore_create executed correctly by default role") # Run restore_delete CLI by default role restore_delete = command_argument_string.restore_delete + \ str(self.restore_id1) error = cli_parser.cli_error(restore_delete) if error and (str(error.strip('\n')).find(restore_delete_error_str) != -1): reporting.add_test_step( "Can not execute restore_delete command by default role", tvaultconf.PASS) LOG.debug( "Command restore_delete did not execute correctly by default role") else: reporting.add_test_step( "Can not execute restore_delete command by default role", tvaultconf.FAIL) raise Exception( "Command restore_delete executed correctly by default role") # Run snapshot_delete CLI by default role snapshot_delete = command_argument_string.snapshot_delete + \ str(self.snapshot_id1) LOG.debug("snapshot_delete command: " + str(snapshot_delete)) error = cli_parser.cli_error(snapshot_delete) if error and (str(error.strip('\n')).find(snapshot_delete_error_str) != -1): reporting.add_test_step( "Can not execute snapshot_delete command by default role", tvaultconf.PASS) LOG.debug( "Command snapshot_delete did not execute correctly by default role") else: reporting.add_test_step( "Can not execute snapshot_delete command by default role", tvaultconf.FAIL) raise Exception( "Command snapshot_delete executed correctly by default role") # Delete workload with CLI by default role workload_delete = command_argument_string.workload_delete + \ str(self.wid1) error = cli_parser.cli_error(workload_delete) if error and (str(error.strip('\n')).find(workload_delete_error_str) != -1): reporting.add_test_step( "Can not execute workload_delete command by default role", tvaultconf.PASS) LOG.debug( "Command workload_delete did not execute correctly by default role") else: reporting.add_test_step( "Can not execute workload_delete command by default role", tvaultconf.FAIL) raise Exception( "Command workload_delete executed correctly by default role") # Use backupuser credentials os.environ['OS_USERNAME'] = CONF.identity.backupuser os.environ['OS_PASSWORD'] = CONF.identity.backupuser_password # Run restore_delete CLI by backup role restore_delete = command_argument_string.restore_delete + \ str(self.restore_id1) error = cli_parser.cli_error(restore_delete) if error and (str(error.strip('\n')).find(restore_delete_error_str) != -1): reporting.add_test_step( "Execute restore_delete command by backup role", tvaultconf.FAIL) raise Exception( "Command restore_delete did not execute correctly by backup role") else: reporting.add_test_step( "Execute restore_delete command by backup role", tvaultconf.PASS) LOG.debug( "Command restore_delete executed correctly by backup role") wc = self.wait_for_snapshot_tobe_available( self.wid1, self.snapshot_id1) # Delete restored VM instance and volume self.delete_restored_vms( self.restore_vm_id1, self.restore_volume_id1) LOG.debug("Restored VMs deleted successfully by backup role") # Run snapshot_delete CLI by backup role snapshot_delete = command_argument_string.snapshot_delete + \ str(self.snapshot_id1) LOG.debug("snapshot_delete command: " + str(snapshot_delete)) error = cli_parser.cli_error(snapshot_delete) if error and (str(error.strip('\n')).find(snapshot_delete_error_str) != -1): reporting.add_test_step( "Execute snapshot_delete command by backup role", tvaultconf.FAIL) raise Exception( "Command snapshot_delete did not execute correctly by backup role") else: reporting.add_test_step( "Execute snapshot_delete command by backup role", tvaultconf.PASS) LOG.debug( "Command snapshot_delete executed correctly by backup role") workload_available = self.wait_for_workload_tobe_available( self.wid1) # Delete workload with CLI by backup role workload_delete = command_argument_string.workload_delete + \ str(self.wid1) error = cli_parser.cli_error(workload_delete) if error and (str(error.strip('\n')).find(workload_delete_error_str) != -1): reporting.add_test_step( "Execute workload_delete command by backup role", tvaultconf.FAIL) raise Exception( "RBAC policy fails for workload deletion by backup role") else: LOG.debug("Workload deleted successfully by backup role") reporting.add_test_step( "Execute workload_delete command by backup role", tvaultconf.PASS) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_tvault_rbac_newadminrole_in_policyjson(self): try: storage_usage_error_str = "Policy doesn't allow workload:get_storage_usage to be performed." get_nodes_error_str = "Policy doesn't allow workload:get_nodes to be performed." # Change policy.json file on tvault to change role and rule self.change_policyjson_file("newadmin", "newadmin_api") # Use new-admin credentials os.environ['OS_USERNAME'] = CONF.identity.newadmin_user os.environ['OS_PASSWORD'] = CONF.identity.newadmin_password # Run get_storage_usage CLI by newadmin role get_storage_usage = command_argument_string.get_storage_usage LOG.debug("get_storage_usage command: " + str(get_storage_usage)) error = cli_parser.cli_error(get_storage_usage) if error and (str(error.strip('\n')).find(storage_usage_error_str) != -1): reporting.add_test_step( "Execute get_storage_usage command by newadmin role", tvaultconf.FAIL) else: reporting.add_test_step( "Execute get_storage_usage command by new-admin role", tvaultconf.PASS) LOG.debug( "Command get_storage_usage executed correctly by new-admin") # Run get_nodes CLI by newadmin role get_nodes = command_argument_string.get_nodes LOG.debug("get_nodes command: " + str(get_nodes)) error = cli_parser.cli_error(get_nodes) if error and (str(error.strip('\n')).find(get_nodes_error_str) != -1): reporting.add_test_step( "Execute get_nodes command by newadmin role", tvaultconf.FAIL) else: reporting.add_test_step( "Execute get_nodes command by newadmin role", tvaultconf.PASS) LOG.debug("Command get_nodes executed correctly by new-admin") # Use admin credentials os.environ['OS_USERNAME'] = CONF.identity.username os.environ['OS_PASSWORD'] = CONF.identity.password # Run get_storage_usage CLI by admin get_storage_usage = command_argument_string.get_storage_usage LOG.debug("get_storage_usage command: " + str(get_storage_usage)) error = cli_parser.cli_error(get_storage_usage) if error and (str(error.strip('\n')).find(storage_usage_error_str) != -1): reporting.add_test_step( "Can not execute get_storage_usage command by admin role ", tvaultconf.PASS) LOG.debug( "Command get_storage_usage did not execute by admin role") else: reporting.add_test_step( "Can not execute get_storage_usage command by admin role", tvaultconf.FAIL) # Run get_nodes CLI by admin get_nodes = command_argument_string.get_nodes LOG.debug("get_nodes command: " + str(get_nodes)) error = cli_parser.cli_error(get_nodes) if error and (str(error.strip('\n')).find(get_nodes_error_str) != -1): reporting.add_test_step( "Can not execute get_nodes command by admin role", tvaultconf.PASS) LOG.debug("Command get_nodes did not execute by admin role") else: reporting.add_test_step( "Can not execute get_nodes command by admin role", tvaultconf.FAIL) # Use non-admin credentials os.environ['OS_USERNAME'] = CONF.identity.nonadmin_user os.environ['OS_PASSWORD'] = CONF.identity.nonadmin_password # Run get_storage_usage CLI by non-admin get_storage_usage = command_argument_string.get_storage_usage LOG.debug("get_storage_usage command: " + str(get_storage_usage)) error = cli_parser.cli_error(get_storage_usage) if error and (str(error.strip('\n')).find(storage_usage_error_str) != -1): reporting.add_test_step( "Can not execute get_storage_usage command by non-admin", tvaultconf.PASS) LOG.debug( "Command get_storage_usage did not execute by nonadmin") else: reporting.add_test_step( "Can not execute get_storage_usage command by non-admin", tvaultconf.FAIL) # Run get_nodes CLI by nonadmin get_nodes = command_argument_string.get_nodes LOG.debug("get_nodes command: " + str(get_nodes)) error = cli_parser.cli_error(get_nodes) if error and (str(error.strip('\n')).find(get_nodes_error_str) != -1): reporting.add_test_step( "Can not execute get_nodes command by non-admin", tvaultconf.PASS) LOG.debug("Command get_nodes did not execute by nonadmin") else: reporting.add_test_step( "Can not execute get_nodes command by non-admin", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.PASS) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_3_modify_workload_scheduler_enable(self): reporting.add_test_script(str(__name__) + "_scheduler_enable") try: # Prerequisites self.created = False self.workload_instances = [] # Launch instance self.vm_id = self.create_vm() LOG.debug("VM ID-3: " + str(self.vm_id)) # Create volume self.volume_id = self.create_volume() LOG.debug("Volume ID-3: " + str(self.volume_id)) # Attach volume to the instance self.attach_volume(self.volume_id, self.vm_id) LOG.debug("Volume attached-3") # Create workload with scheduler disabled using CLI workload_create = command_argument_string.workload_create + \ " --instance instance-id=" + \ str(self.vm_id) + " --jobschedule enabled=False" rc = cli_parser.cli_returncode(workload_create) if rc != 0: reporting.add_test_step( "Execute workload-create command with scheduler disable", tvaultconf.FAIL) raise Exception( "Command workload create did not execute correctly") else: reporting.add_test_step( "Execute workload-create command with scheduler disable", tvaultconf.PASS) LOG.debug("Command workload create executed correctly") time.sleep(10) self.wid = query_data.get_workload_id(tvaultconf.workload_name) LOG.debug("Workload ID-3: " + str(self.wid)) if (self.wid is not None): self.wait_for_workload_tobe_available(self.wid) if (self.getWorkloadStatus(self.wid) == "available"): reporting.add_test_step( "Create workload with scheduler disable", tvaultconf.PASS) else: reporting.add_test_step( "Create workload with scheduler disable", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) else: reporting.add_test_step( "Create workload with scheduler disable", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) LOG.debug("Workload ID: " + str(self.wid)) # Verify workload created scheduler disable status = self.getSchedulerStatus(self.wid) if status: reporting.add_test_step( "Verify workload created with scheduler disable", tvaultconf.FAIL) raise Exception( "Workload has not been created with scheduler disabled") else: reporting.add_test_step( "Verify workload created with scheduler disable", tvaultconf.PASS) LOG.debug( "Workload created with scheduler disabled successfully") # Get workload scheduler details schedule_details = self.getSchedulerDetails(self.wid) scheduled_start_time = schedule_details['start_time'] interval = schedule_details['interval'] # Change global job scheduler to disable LOG.debug("Change Global job scheduler to disable") status = self.disable_global_job_scheduler() if not status: reporting.add_test_step("Global job scheduler disable", tvaultconf.PASS) LOG.debug("Global job scheduler disabled successfully") else: reporting.add_test_step("Global job scheduler disable", tvaultconf.FAIL) raise Exception("Global job scheduler not disabled") # Modify workload scheduler to enable workload_modify_command = command_argument_string.workload_modify + \ str(self.wid) + " --jobschedule enabled=True" error = cli_parser.cli_error(workload_modify_command) if error and (str( error.strip('\n') ).find("Cannot update scheduler related fields when global jobscheduler is disabled." ) != -1): reporting.add_test_step( "Does not execute workload-modify scheduler enable", tvaultconf.PASS) LOG.debug("Command executed correctly") reporting.add_test_step("Throws proper message", tvaultconf.PASS) LOG.debug("Error message :" + str(error)) else: reporting.add_test_step( "Does not execute workload-modify scheduler enable", tvaultconf.FAIL) reporting.add_test_step("Throws proper message", tvaultconf.FAIL) raise Exception("Command did not execute correctly") # Change global job scheduler to enable LOG.debug("Change Global job scheduler to enable") status = self.enable_global_job_scheduler() if status: reporting.add_test_step("Global job scheduler enable", tvaultconf.PASS) LOG.debug("Global job scheduler enabled successfully") else: reporting.add_test_step("Global job scheduler enable", tvaultconf.FAIL) raise Exception("Global job scheduler not enabled") # Modify workload scheduler to enable and set the start date, time # and timezone now = datetime.datetime.utcnow() now_date = datetime.datetime.strftime(now, "%m/%d/%Y") now_time = datetime.datetime.strftime(now, "%I:%M %p") now_time_plus_15 = now + datetime.timedelta(minutes=15) now_time_plus_15 = datetime.datetime.strftime( now_time_plus_15, "%I:%M %p") workload_modify_command = command_argument_string.workload_modify + str( self.wid ) + " --jobschedule enabled=True" + " --jobschedule start_date=" + str( now_date) + " --jobschedule start_time=" + "'" + str( now_time_plus_15).strip( ) + "'" + " --jobschedule timezone=UTC" rc = cli_parser.cli_returncode(workload_modify_command) if rc != 0: reporting.add_test_step( "Execute workload-modify scheduler enable", tvaultconf.FAIL) raise Exception("Command did not execute correctly") else: reporting.add_test_step( "Execute workload-modify scheduler enable", tvaultconf.PASS) LOG.debug("Command executed correctly") # Verify workload scheduler changed to enable self.wait_for_workload_tobe_available(self.wid) status = self.getSchedulerStatus(self.wid) if status: reporting.add_test_step("Verify workload scheduler enabled", tvaultconf.PASS) LOG.debug("workload scheduler enabled successfully") else: reporting.add_test_step("Verify workload scheduler enabled", tvaultconf.FAIL) LOG.debug("workload scheduler enabled unsuccessfully") # Verify interval value and nest_snapshot_run values schedule_details = self.getSchedulerDetails(self.wid) interval_after_enable = schedule_details['interval'] next_run_time_after_enable = schedule_details['nextrun'] next_run_time_after_enable = int(next_run_time_after_enable) LOG.debug("interval_after_enable " + str(interval_after_enable)) LOG.debug("next_run_time_after_enable" + str(next_run_time_after_enable)) start_date = schedule_details['start_date'] start_time = schedule_details['start_time'] date_time = start_date + " " + start_time start_date_time = datetime.datetime.strptime( date_time, "%m/%d/%Y %H:%M %p") LOG.debug("Scheduled start and date time is: " + str(start_date_time)) utc_24hr = datetime.datetime.utcnow() utc_12hr = datetime.datetime.strftime(utc_24hr, "%m/%d/%Y %I:%M %p") utc_12hr = datetime.datetime.strptime(utc_12hr, "%m/%d/%Y %H:%M %p") time_diff = (start_date_time - utc_12hr).total_seconds() time_diff = int(time_diff) LOG.debug( "Time difference between UTC time and scheduled start time: " + str(time_diff)) delta = abs(time_diff - next_run_time_after_enable) # Condition for Interval value and time difference should not be # more than two minutes if delta < 120 and interval == interval_after_enable: reporting.add_test_step( "Verify Interval and Next snapshot run time values are correct", tvaultconf.PASS) LOG.debug( "Interval and Next snapshot run time values are correct") else: reporting.add_test_step( "Verify Interval and Next snapshot run time values are correct", tvaultconf.FAIL) raise Exception( "Interval and Next snapshot run time values are incorrect") reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write() finally: # Delete workload status = self.workload_delete(self.wid) time.sleep(10)
def test_2_modify_workload_scheduler_disable(self): reporting.add_test_script(str(__name__) + "_scheduler_disable") try: # Prerequisites self.created = False self.workload_instances = [] # Launch instance self.vm_id = self.create_vm() LOG.debug("VM ID-2: " + str(self.vm_id)) # Create volume self.volume_id = self.create_volume() LOG.debug("Volume ID-2: " + str(self.volume_id)) # Attach volume to the instance self.attach_volume(self.volume_id, self.vm_id) LOG.debug("Volume attached-2") # Create workload with scheduler enabled self.workload_instances.append(self.vm_id) self.wid = self.workload_create( self.workload_instances, tvaultconf.parallel, workload_name=tvaultconf.workload_name, workload_cleanup=True) LOG.debug("Workload ID-2: " + str(self.wid)) # Verify workload created with scheduler enable status = self.getSchedulerStatus(self.wid) if status: reporting.add_test_step( "Workload created with scheduler enabled", tvaultconf.PASS) LOG.debug( "Workload created with scheduler enabled successfully") else: reporting.add_test_step( "Workload created with scheduler enabled", tvaultconf.FAIL) raise Exception( "Workload has not been created with scheduler enabled") # Get workload scheduler details schedule_details = self.getSchedulerDetails(self.wid) scheduled_start_time = schedule_details['start_time'] interval = schedule_details['interval'] # Change global job scheduler to disable LOG.debug("Change Global job scheduler to disable") status = self.disable_global_job_scheduler() if not status: reporting.add_test_step("Global job scheduler disable", tvaultconf.PASS) LOG.debug("Global job scheduler disabled successfully") else: reporting.add_test_step("Global job scheduler disable", tvaultconf.FAIL) raise Exception("Global job scheduler not disabled") # Modify workload scheduler to disable workload_modify_command = command_argument_string.workload_modify + \ str(self.wid) + " --jobschedule enabled=False" error = cli_parser.cli_error(workload_modify_command) if error and (str( error.strip('\n') ).find("Cannot update scheduler related fields when global jobscheduler is disabled." ) != -1): reporting.add_test_step( "Does not execute workload-modify scheduler disable", tvaultconf.PASS) LOG.debug("Command executed correctly") reporting.add_test_step("Throws proper message", tvaultconf.PASS) LOG.debug("Error message :" + str(error)) else: reporting.add_test_step( "Does not execute workload-modify scheduler disable", tvaultconf.FAIL) reporting.add_test_step("Throws proper message", tvaultconf.FAIL) raise Exception("Command did not execute correctly") # Change global job scheduler to enable LOG.debug("Change Global job scheduler to enable") status = self.enable_global_job_scheduler() if status: reporting.add_test_step("Global job scheduler enable", tvaultconf.PASS) LOG.debug("Global job scheduler enabled successfully") else: reporting.add_test_step("Global job scheduler enable", tvaultconf.FAIL) raise Exception("Global job scheduler not enabled") # Modify workload scheduler to disable using CLI command workload_modify_command = command_argument_string.workload_modify + \ str(self.wid) + " --jobschedule enabled=False" rc = cli_parser.cli_returncode(workload_modify_command) if rc != 0: reporting.add_test_step( "Execute workload-modify scheduler disable", tvaultconf.FAIL) raise Exception("Command did not execute correctly") else: reporting.add_test_step( "Execute workload-modify scheduler disable", tvaultconf.PASS) LOG.debug("Command executed correctly") # Verify workload scheduler changed to disable status = self.getSchedulerStatus(self.wid) if status: reporting.add_test_step("Verify workload scheduler disabled", tvaultconf.FAIL) LOG.debug("workload scheduler disabled unsuccessfully") else: reporting.add_test_step("Verify workload scheduler disabled", tvaultconf.PASS) LOG.debug("workload scheduler disabled successfully") # Verify interval value and nest_snapshot_run values schedule_details = self.getSchedulerDetails(self.wid) interval_after_disable = schedule_details['interval'] if interval == interval_after_disable and 'nextrun' not in schedule_details: reporting.add_test_step( "Verify Interval and Next snapshot run time values are correct", tvaultconf.PASS) LOG.debug( "Interval and Next snapshot run time values are correct") else: reporting.add_test_step( "Verify Interval and Next snapshot run time values are correct", tvaultconf.FAIL) raise Exception( "Interval and Next snapshot run time values are incorrect") reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()
def test_3_modify_workload_scheduler_enable(self): reporting.add_test_script(str(__name__) + "_scheduler_enable") try: #Prerequisites self.created = False self.workload_instances = [] #Launch instance self.vm_id = self.create_vm() LOG.debug("VM ID-3: " + str(self.vm_id)) #Create volume self.volume_id = self.create_volume() LOG.debug("Volume ID-3: " + str(self.volume_id)) #Attach volume to the instance self.attach_volume(self.volume_id, self.vm_id) LOG.debug("Volume attached-3") #Create workload with scheduler disabled using CLI workload_create = command_argument_string.workload_create + " --instance instance-id=" + str( self.vm_id) + " --jobschedule enabled=False" rc = cli_parser.cli_returncode(workload_create) if rc != 0: reporting.add_test_step( "Execute workload-create command with scheduler disable", tvaultconf.FAIL) raise Exception( "Command workload create did not execute correctly") else: reporting.add_test_step( "Execute workload-create command with scheduler disable", tvaultconf.PASS) LOG.debug("Command workload create executed correctly") time.sleep(10) self.wid = query_data.get_workload_id(tvaultconf.workload_name) LOG.debug("Workload ID-3: " + str(self.wid)) if (self.wid != None): self.wait_for_workload_tobe_available(self.wid) if (self.getWorkloadStatus(self.wid) == "available"): reporting.add_test_step( "Create workload with scheduler disable", tvaultconf.PASS) else: reporting.add_test_step( "Create workload with scheduler disable", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) else: reporting.add_test_step( "Create workload with scheduler disable", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.FAIL) LOG.debug("Workload ID: " + str(self.wid)) #Verify workload created scheduler disable status = self.getSchedulerStatus(self.wid) if status: reporting.add_test_step( "Verify workload created with scheduler disable", tvaultconf.FAIL) raise Exception( "Workload has not been created with scheduler disabled") else: reporting.add_test_step( "Verify workload created with scheduler disable", tvaultconf.PASS) LOG.debug( "Workload created with scheduler disabled successfully") #Get workload scheduler details schedule_details = self.getSchedulerDetails(self.wid) scheduled_start_time = schedule_details['start_time'] interval = schedule_details['interval'] #Change global job scheduler to disable LOG.debug("Change Global job scheduler to disable") status = self.disable_global_job_scheduler() if not status: reporting.add_test_step("Global job scheduler disable", tvaultconf.PASS) LOG.debug("Global job scheduler disabled successfully") else: reporting.add_test_step("Global job scheduler disable", tvaultconf.FAIL) raise Exception("Global job scheduler not disabled") #Modify workload scheduler to enable workload_modify_command = command_argument_string.workload_modify + str( self.wid) + " --jobschedule enabled=True" error = cli_parser.cli_error(workload_modify_command) if error and str( error.strip('\n') ) == "ERROR: Cannot update scheduler related fields when global jobscheduler is disabled.": reporting.add_test_step( "Does not execute workload-modify scheduler enable", tvaultconf.PASS) LOG.debug("Command executed correctly") reporting.add_test_step("Throws proper message", tvaultconf.PASS) LOG.debug("Error message :" + str(error)) else: reporting.add_test_step( "Does not execute workload-modify scheduler enable", tvaultconf.FAIL) reporting.add_test_step("Throws proper message", tvaultconf.FAIL) raise Exception("Command did not execute correctly") #Change global job scheduler to enable LOG.debug("Change Global job scheduler to enable") status = self.enable_global_job_scheduler() if status: reporting.add_test_step("Global job scheduler enable", tvaultconf.PASS) LOG.debug("Global job scheduler enabled successfully") else: reporting.add_test_step("Global job scheduler enable", tvaultconf.FAIL) raise Exception("Global job scheduler not enabled") #Modify workload scheduler to enable workload_modify_command = command_argument_string.workload_modify + str( self.wid) + " --jobschedule enabled=True" rc = cli_parser.cli_returncode(workload_modify_command) if rc != 0: reporting.add_test_step( "Execute workload-modify scheduler enable", tvaultconf.FAIL) raise Exception("Command did not execute correctly") else: reporting.add_test_step( "Execute workload-modify scheduler enable", tvaultconf.PASS) LOG.debug("Command executed correctly") #Verify workload scheduler changed to enable self.wait_for_workload_tobe_available(self.wid) status = self.getSchedulerStatus(self.wid) if status: reporting.add_test_step("Verify workload scheduler enabled", tvaultconf.PASS) LOG.debug("workload scheduler enabled successfully") else: reporting.add_test_step("Verify workload scheduler enabled", tvaultconf.FAIL) LOG.debug("workload scheduler enabled unsuccessfully") #Verify interval value and nest_snapshot_run values schedule_details = self.getSchedulerDetails(self.wid) interval_after_enable = schedule_details['interval'] next_run_time_after_enable = schedule_details['nextrun'] LOG.debug("interval_after_enable " + str(interval_after_enable)) LOG.debug("next_run_time_after_enable" + str(next_run_time_after_enable)) scheduled_start_time_periods = ''.join( [i for i in scheduled_start_time if not i.isdigit()]) scheduled_start_time = ''.join( [i for i in scheduled_start_time if not i.isalpha()]) current_time = int(time.time()) LOG.debug("current_time " + str(current_time)) start_time = current_time + next_run_time_after_enable LOG.debug("start_time " + str(start_time)) time3hours = datetime.datetime.utcfromtimestamp(start_time) start_time_in_hours = time3hours.strftime('%I:%M %p') start_time_in_periods = ''.join( [i for i in start_time_in_hours if not i.isdigit()]) start_time_in_hours = ''.join( [i for i in start_time_in_hours if not i.isalpha()]) LOG.debug("start_time_in_hours " + str(start_time_in_hours)) #Calculate difference between times in minutes timeA = datetime.datetime.strptime(scheduled_start_time.strip(), "%H:%M") timeB = datetime.datetime.strptime(start_time_in_hours.strip(), "%H:%M") newTime = timeA - timeB timedelta = newTime.seconds / 60 #Condition for Interval value and time difference should not be more than two minutes and time periods AM/PM if timedelta < 2 and scheduled_start_time_periods == start_time_in_periods and interval == interval_after_enable: reporting.add_test_step( "Verify Interval and Next snapshot run time values are correct", tvaultconf.PASS) LOG.debug( "Interval and Next snapshot run time values are correct") else: reporting.add_test_step( "Verify Interval and Next snapshot run time values are correct", tvaultconf.FAIL) raise Exception( "Interval and Next snapshot run time values are incorrect") reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write() finally: #Delete workload status = self.workload_delete(self.wid) time.sleep(10)
def test_tvault_rbac_nonadmin_notableto(self): try: storage_usage_error_str = "Policy doesn't allow workload:get_storage_usage to be performed." import_workload_list_error_str = "Policy doesn't allow workload:get_import_workloads_list to be performed." disable_job_sch_error_str = "Policy doesn't allow workload:workload_disable_global_job_scheduler to be performed." enable_job_sch_error_str = "Policy doesn't allow workload:workload_enable_global_job_scheduler to be performed." get_nodes_error_str = "Policy doesn't allow workload:get_nodes to be performed." license_check_error_str = "Policy doesn't allow workload:license_check to be performed." license_list_error_str = "Policy doesn't allow workload:license_list to be performed." # Use non-admin credentials os.environ['OS_USERNAME'] = CONF.identity.nonadmin_user os.environ['OS_PASSWORD'] = CONF.identity.nonadmin_password # Run get_storage_usage CLI get_storage_usage = command_argument_string.get_storage_usage LOG.debug("get_storage_usage command: " + str(get_storage_usage)) error = cli_parser.cli_error(get_storage_usage) if error and (str(error.strip('\n')).find(storage_usage_error_str) != -1): reporting.add_test_step( "Can not execute get_storage_usage command ", tvaultconf.PASS) LOG.debug("Command get_storage_usage did not execute correctly") else: reporting.add_test_step( "Can not execute get_storage_usage command", tvaultconf.FAIL) # Run get_import_workloads_list CLI get_import_workloads_list = command_argument_string.get_import_workloads_list LOG.debug("get_import_workloads_list command: " + str(get_import_workloads_list)) error = cli_parser.cli_error(get_import_workloads_list) if error and (str(error.strip('\n')).find(import_workload_list_error_str) != -1): reporting.add_test_step( "Can not execute get_import_workloads_list command ", tvaultconf.PASS) LOG.debug( "Command get_import_workloads_list did not execute correctly") else: reporting.add_test_step( "Can not execute get_import_workloads_list command", tvaultconf.FAIL) # Run workload_disable_global_job_scheduler CLI workload_disable_global_job_scheduler = command_argument_string.workload_disable_global_job_scheduler LOG.debug("workload_disable_global_job_scheduler command: " + str(workload_disable_global_job_scheduler)) error = cli_parser.cli_error(workload_disable_global_job_scheduler) if error and (str(error.strip('\n')).find(disable_job_sch_error_str) != -1): reporting.add_test_step( "Can not execute workload_disable_global_job_scheduler command ", tvaultconf.PASS) LOG.debug( "Command workload_disable_global_job_scheduler did not execute correctly") else: reporting.add_test_step( "Can not execute workload_disable_global_job_scheduler command", tvaultconf.FAIL) # Run workload_enable_global_job_scheduler CLI workload_enable_global_job_scheduler = command_argument_string.workload_enable_global_job_scheduler LOG.debug("workload_enable_global_job_scheduler command: " + str(workload_enable_global_job_scheduler)) error = cli_parser.cli_error(workload_enable_global_job_scheduler) if error and (str(error.strip('\n')).find(enable_job_sch_error_str) != -1): reporting.add_test_step( "Can not execute workload_enable_global_job_scheduler command ", tvaultconf.PASS) LOG.debug( "Command workload_enable_global_job_scheduler did not execute correctly") else: reporting.add_test_step( "Can not execute workload_enable_global_job_scheduler command", tvaultconf.FAIL) # Run get_nodes CLI get_nodes = command_argument_string.get_nodes LOG.debug("get_nodes command: " + str(get_nodes)) error = cli_parser.cli_error(get_nodes) if error and (str(error.strip('\n')).find(get_nodes_error_str) != -1): reporting.add_test_step( "Can not execute get_nodes command ", tvaultconf.PASS) LOG.debug("Command get_nodes did not execute correctly") else: reporting.add_test_step( "Can not execute get_nodes command", tvaultconf.FAIL) # Run license_check CLI license_check = command_argument_string.license_check LOG.debug("license_check command: " + str(license_check)) error = cli_parser.cli_error(license_check) if error and (str(error.strip('\n')).find(license_check_error_str) != -1): reporting.add_test_step( "Can not execute license_check command ", tvaultconf.PASS) LOG.debug("Command license_check did not execute correctly") else: reporting.add_test_step( "Can not execute license_check command", tvaultconf.FAIL) # Run license_list CLI license_list = command_argument_string.license_list LOG.debug("license_list command: " + str(license_list)) error = cli_parser.cli_error(license_list) if error and (str(error.strip('\n')).find(license_list_error_str) != -1): reporting.add_test_step( "Can not execute license_list command ", tvaultconf.PASS) LOG.debug("Command license_list did not execute correctly") else: reporting.add_test_step( "Can not execute license_list command", tvaultconf.FAIL) reporting.set_test_script_status(tvaultconf.PASS) reporting.test_case_to_write() except Exception as e: LOG.error("Exception: " + str(e)) reporting.set_test_script_status(tvaultconf.FAIL) reporting.test_case_to_write()