def test_start_from_suspend( self, testing_vm, verify_vm_suspended, soft_assert): """Tests start from suspend Metadata: test_flag: power_control, provision """ try: testing_vm.provider.refresh_provider_relationships() testing_vm.wait_for_vm_state_change( desired_state=testing_vm.STATE_SUSPENDED, timeout=450, from_details=True) except TimedOutError: if testing_vm.provider.one_of(RHEVMProvider): logger.warning('working around bz1174858, ignoring timeout') else: raise last_boot_time = testing_vm.get_detail(properties=("Power Management", "Last Boot Time")) testing_vm.power_control_from_cfme(option=testing_vm.POWER_ON, cancel=False, from_details=True) flash.assert_message_contain("Start initiated") if_scvmm_refresh_provider(testing_vm.provider) testing_vm.wait_for_vm_state_change( desired_state=testing_vm.STATE_ON, timeout=720, from_details=True) wait_for_last_boot_timestamp_refresh(testing_vm, last_boot_time, timeout=600) soft_assert( testing_vm.provider.mgmt.is_vm_running(testing_vm.name), "vm not running")
def test_distributed_vm_power_control(request, test_vm, provider, verify_vm_running, register_event, soft_assert, setup_provider): """Tests that a replication parent appliance can control the power state of a VM being managed by a replication child appliance. Metadata: test_flag: replication """ appl1, appl2 = get_replication_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: configure_db_replication(appl2.address) provider.create() wait_for_a_provider() appl2.ipapp.browser_steal = True with appl2.ipapp: register_event( test_vm.provider.type, "vm", test_vm.name, ["vm_power_off_req", "vm_power_off"]) test_vm.power_control_from_cfme(option=test_vm.POWER_OFF, cancel=False) flash.assert_message_contain("Stop initiated") pytest.sel.force_navigate( 'infrastructure_provider', context={'provider': test_vm.provider}) test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_OFF, timeout=900) soft_assert(test_vm.find_quadicon().state == 'currentstate-off') soft_assert( not test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm running")
def delete(self, from_dest='All'): """ Delete the stack, starting from the destination provided by from_dest @param from_dest: where to delete from, a valid navigation destination for Stack """ # Navigate to the starting destination if from_dest in navigator.list_destinations(self): navigate_to(self, from_dest) else: msg = 'cfme.cloud.stack does not have destination {}'.format( from_dest) raise DestinationNotFound(msg) # Delete using the method appropriate for the starting destination if from_dest == 'All': sel.check(Quadicon(self.name, self.quad_name).checkbox()) cfg_btn("Remove Orchestration Stacks", invokes_alert=True) elif from_dest == 'Details': cfg_btn("Remove this Orchestration Stack", invokes_alert=True) sel.handle_alert() # The delete initiated message may get missed if the delete is fast try: flash.assert_message_contain( "Delete initiated for 1 Orchestration Stacks") except FlashMessageException as ex: if 'No flash message contains' in ex.message: flash.assert_message_contain( "The selected Orchestration Stacks was deleted") self.wait_for_delete()
def test_start_from_suspend( self, provider_init, test_vm, verify_vm_suspended, soft_assert, register_event): test_vm.wait_for_vm_state_change( desired_state=Vm.STATE_SUSPENDED, timeout=720, from_details=True) register_event( test_vm.provider_crud.get_yaml_data()['type'], "vm", test_vm.name, ["vm_power_on_req", "vm_power_on"]) last_boot_time = test_vm.get_detail(properties=("Power Management", "Last Boot Time")) state_chg_time = test_vm.get_detail(properties=("Power Management", "State Changed On")) self._check_power_options_when_off(soft_assert, test_vm, from_details=True) test_vm.power_control_from_cfme(option=Vm.POWER_ON, cancel=False, from_details=True) flash.assert_message_contain("Start initiated") pytest.sel.force_navigate( 'infrastructure_provider', context={'provider': test_vm.provider_crud}) test_vm.wait_for_vm_state_change( desired_state=Vm.STATE_ON, timeout=720, from_details=True) self._wait_for_last_boot_timestamp_refresh(test_vm, last_boot_time, timeout=600) soft_assert( test_vm.provider_crud.get_mgmt_system().is_vm_running(test_vm.name), "vm not running") new_state_chg_time = test_vm.get_detail(properties=("Power Management", "State Changed On")) soft_assert(new_state_chg_time != state_chg_time, "ui: " + new_state_chg_time + " should != orig: " + state_chg_time) new_last_boot_time = test_vm.get_detail(properties=("Power Management", "Last Boot Time")) soft_assert(new_last_boot_time != last_boot_time, "ui: " + new_last_boot_time + " should != orig: " + last_boot_time)
def test_distributed_vm_power_control(request, test_vm, vmware_provider, verify_vm_running, register_event, soft_assert): """Tests that a replication parent appliance can control the power state of a VM being managed by a replication child appliance. Metadata: test_flag: replication """ appl1, appl2 = get_replication_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: configure_db_replication(appl2.address) vmware_provider.create() wait_for_a_provider() appl2.ipapp.browser_steal = True with appl2.ipapp: register_event('VmOrTemplate', test_vm.name, ['request_vm_poweroff', 'vm_poweroff']) test_vm.power_control_from_cfme(option=test_vm.POWER_OFF, cancel=False) flash.assert_message_contain("Stop initiated") navigate_to(test_vm.provider, 'Details') test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_OFF, timeout=900) soft_assert(test_vm.find_quadicon().state == 'currentstate-off') soft_assert( not test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm running")
def test_suspend(self, provider_init, test_vm, verify_vm_running, soft_assert, register_event): test_vm.wait_for_vm_state_change( desired_state=Vm.STATE_ON, timeout=720, from_details=True) last_boot_time = test_vm.get_detail(properties=("Power Management", "Last Boot Time")) register_event( test_vm.provider_crud.get_yaml_data()['type'], "vm", test_vm.name, ["vm_suspend_req", "vm_suspend"]) test_vm.power_control_from_cfme(option=Vm.SUSPEND, cancel=False, from_details=True) flash.assert_message_contain("Suspend initiated") pytest.sel.force_navigate( 'infrastructure_provider', context={'provider': test_vm.provider_crud}) try: test_vm.wait_for_vm_state_change( desired_state='suspended', timeout=600, from_details=True) except TimedOutError: logger.warning('working around bz977489 by clicking the refresh button') test_vm.refresh_relationships() test_vm.wait_for_vm_state_change( desired_state=Vm.STATE_SUSPENDED, timeout=300, from_details=True) soft_assert( test_vm.provider_crud.get_mgmt_system().is_vm_suspended( test_vm.name), "vm not suspended") # BUG - https://bugzilla.redhat.com/show_bug.cgi?id=1101604 if not isinstance(test_vm.provider_crud, RHEVMProvider): new_last_boot_time = test_vm.get_detail( properties=("Power Management", "Last Boot Time")) soft_assert(new_last_boot_time == last_boot_time, "ui: " + new_last_boot_time + " should == orig: " + last_boot_time)
def test_terminate(setup_provider_funcscope, provider_type, provider_mgmt, test_instance, soft_assert, verify_vm_running): """ Tests instance terminate Metadata: test_flag: power_control, provision """ test_instance.wait_for_vm_state_change( desired_state=test_instance.STATE_ON, timeout=720, from_details=True) test_instance.power_control_from_cfme( option=test_instance.TERMINATE, cancel=False, from_details=True) flash.assert_message_contain("Terminate initiated") wait_for(test_instance.does_vm_exist_in_cfme, fail_condition=True, num_sec=600, delay=30, fail_func=test_instance.provider_crud.refresh_provider_relationships, message="VM no longer exists in cfme UI") if provider_type == 'openstack': soft_assert(not provider_mgmt.does_vm_exist(test_instance.name), "instance still exists") else: soft_assert( provider_mgmt.is_vm_state(test_instance.name, provider_mgmt.states['deleted']), "instance still exists") sel.force_navigate("clouds_instances_archived_branch") soft_assert( test_instance.name in get_all_instances(do_not_navigate=True), "instance is not among archived instances")
def test_power_off(self, testing_vm, verify_vm_running, soft_assert): """Tests power off Metadata: test_flag: power_control, provision """ testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_ON, timeout=720, from_details=True) last_boot_time = testing_vm.get_detail(properties=("Power Management", "Last Boot Time")) testing_vm.power_control_from_cfme(option=testing_vm.POWER_OFF, cancel=False, from_details=True) flash.assert_message_contain("Stop initiated") if_scvmm_refresh_provider(testing_vm.provider) testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_OFF, timeout=720, from_details=True) soft_assert( not testing_vm.provider.mgmt.is_vm_running(testing_vm.name), "vm running") # BUG - https://bugzilla.redhat.com/show_bug.cgi?id=1101604 if not testing_vm.provider.one_of(RHEVMProvider): new_last_boot_time = testing_vm.get_detail( properties=("Power Management", "Last Boot Time")) soft_assert( new_last_boot_time == last_boot_time, "ui: {} should == orig: {}".format(new_last_boot_time, last_boot_time))
def _scan_ui(vm): logger.info('Initiating vm smart scan on ' + vm.provider.name + ":" + vm.name) vm.smartstate_scan(cancel=False, from_details=True) flash.assert_message_contain(version.pick({ version.LOWEST: "Smart State Analysis initiated", "5.5": "Analysis initiated for 1 VM and Instance from the CFME Database"})) # wait for task to complete pytest.sel.force_navigate('tasks_my_vm') wait_for(is_vm_analysis_finished, [vm.name], delay=15, num_sec=600, handle_exception=True, fail_func=lambda: toolbar.select('Reload')) # make sure fleecing was successful if version.current_version() >= "5.4": task_row = tasks.tasks_table.find_row_by_cells({ 'task_name': "Scan from Vm %s" % vm.name, 'state': 'finished' }) else: task_row = tasks.tasks_table.find_row_by_cells({ 'task_name': "Scan from Vm %s" % vm.name, 'state': 'Finished' }) icon_img = task_row.columns[1].find_element_by_tag_name("img") assert "checkmark" in icon_img.get_attribute("src")
def test_suspend(self, test_vm, verify_vm_running, soft_assert, register_event, bug): """Tests suspend Metadata: test_flag: power_control, provision """ test_vm.wait_for_vm_state_change( desired_state=test_vm.STATE_ON, timeout=720, from_details=True) last_boot_time = test_vm.get_detail(properties=("Power Management", "Last Boot Time")) register_event( test_vm.provider.type, "vm", test_vm.name, ["vm_suspend_req", "vm_suspend"]) test_vm.power_control_from_cfme(option=test_vm.SUSPEND, cancel=False, from_details=True) flash.assert_message_contain("Suspend initiated") pytest.sel.force_navigate( 'infrastructure_provider', context={'provider': test_vm.provider}) if_scvmm_refresh_provider(test_vm.provider) try: test_vm.wait_for_vm_state_change( desired_state=test_vm.STATE_SUSPENDED, timeout=450, from_details=True) except TimedOutError as e: if test_vm.provider.type == "rhevm": logger.warning('working around bz1174858, ignoring timeout') else: raise e soft_assert( test_vm.provider.mgmt.is_vm_suspended( test_vm.name), "vm not suspended") # BUG - https://bugzilla.redhat.com/show_bug.cgi?id=1101604 if test_vm.provider.type != "rhevm": new_last_boot_time = test_vm.get_detail( properties=("Power Management", "Last Boot Time")) soft_assert(new_last_boot_time == last_boot_time, "ui: {} should == orig: {}".format(new_last_boot_time, last_boot_time))
def test_power_on(self, test_vm, verify_vm_stopped, soft_assert, register_event, bug): """Tests power on Metadata: test_flag: power_control, provision """ test_vm.wait_for_vm_state_change( desired_state='off', timeout=720, from_details=True) register_event('VmOrTemplate', test_vm.name, ['request_vm_start', 'vm_start']) last_boot_time = test_vm.get_detail(properties=("Power Management", "Last Boot Time")) state_chg_time = test_vm.get_detail(properties=("Power Management", "State Changed On")) self._check_power_options_when_off(soft_assert, test_vm, from_details=True) test_vm.power_control_from_cfme(option=test_vm.POWER_ON, cancel=False, from_details=True) flash.assert_message_contain("Start initiated") pytest.sel.force_navigate( 'infrastructure_provider', context={'provider': test_vm.provider}) if_scvmm_refresh_provider(test_vm.provider) test_vm.wait_for_vm_state_change( desired_state=test_vm.STATE_ON, timeout=720, from_details=True) self._wait_for_last_boot_timestamp_refresh(test_vm, last_boot_time, timeout=600) soft_assert( test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm not running") new_state_chg_time = test_vm.get_detail(properties=("Power Management", "State Changed On")) soft_assert(new_state_chg_time != state_chg_time, "ui: {} == orig: {}".format(new_state_chg_time, state_chg_time)) if test_vm.provider.type != "scvmm": new_last_boot_time = test_vm.get_detail( properties=("Power Management", "Last Boot Time")) soft_assert(new_last_boot_time != last_boot_time, "ui: {} == orig: {}".format(new_last_boot_time, last_boot_time))
def test_suspend(self, test_vm, verify_vm_running, soft_assert, register_event, bug): """Tests suspend Metadata: test_flag: power_control, provision """ test_vm.wait_for_vm_state_change( desired_state=test_vm.STATE_ON, timeout=720, from_details=True) last_boot_time = test_vm.get_detail(properties=("Power Management", "Last Boot Time")) register_event('VmOrTemplate', test_vm.name, ['request_vm_suspend', 'vm_suspend']) test_vm.power_control_from_cfme(option=test_vm.SUSPEND, cancel=False, from_details=True) flash.assert_message_contain("Suspend initiated") pytest.sel.force_navigate( 'infrastructure_provider', context={'provider': test_vm.provider}) if_scvmm_refresh_provider(test_vm.provider) try: test_vm.wait_for_vm_state_change( desired_state=test_vm.STATE_SUSPENDED, timeout=450, from_details=True) except TimedOutError as e: if test_vm.provider.type == "rhevm": logger.warning('working around bz1174858, ignoring timeout') else: raise e soft_assert( test_vm.provider.mgmt.is_vm_suspended( test_vm.name), "vm not suspended") # BUG - https://bugzilla.redhat.com/show_bug.cgi?id=1101604 if test_vm.provider.type != "rhevm": new_last_boot_time = test_vm.get_detail( properties=("Power Management", "Last Boot Time")) soft_assert(new_last_boot_time == last_boot_time, "ui: {} should == orig: {}".format(new_last_boot_time, last_boot_time))
def test_power_off(self, test_vm, verify_vm_running, soft_assert, register_event, bug): """Tests power off Metadata: test_flag: power_control, provision """ test_vm.wait_for_vm_state_change( desired_state=test_vm.STATE_ON, timeout=720, from_details=True) last_boot_time = test_vm.get_detail(properties=("Power Management", "Last Boot Time")) register_event('VmOrTemplate', test_vm.name, ['request_vm_poweroff', 'vm_poweroff']) self._check_power_options_when_on(soft_assert, test_vm, bug, from_details=True) test_vm.power_control_from_cfme(option=test_vm.POWER_OFF, cancel=False, from_details=True) flash.assert_message_contain("Stop initiated") pytest.sel.force_navigate( 'infrastructure_provider', context={'provider': test_vm.provider}) if_scvmm_refresh_provider(test_vm.provider) test_vm.wait_for_vm_state_change( desired_state='off', timeout=720, from_details=True) soft_assert( not test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm running") # BUG - https://bugzilla.redhat.com/show_bug.cgi?id=1101604 if test_vm.provider.type != "rhevm": new_last_boot_time = test_vm.get_detail( properties=("Power Management", "Last Boot Time")) soft_assert(new_last_boot_time == last_boot_time, "ui: {} should == orig: {}".format(new_last_boot_time, last_boot_time))
def test_server_name(appliance): """Tests that changing the server name updates the about page""" flash_msg = 'Configuration settings saved for CFME Server "{}' navigate_to(appliance.server, 'Server') old_server_name = sel.value(BasicInformation.basic_information.appliance_name) new_server_name = old_server_name + "-CFME" settings_pg = BasicInformation(appliance_name=new_server_name) settings_pg.update() flash.assert_message_contain(flash_msg.format(new_server_name)) appliance.server.name = new_server_name # CFME updates about box only after any navigation BZ(1408681) - closed wontfix navigate_to(appliance.server, 'Dashboard') # opens and closes about modal current_server_name = about.get_detail(about.SERVER) assert new_server_name == current_server_name, \ "Server name in About section does not match the new name" clear_property_cache(appliance, 'configuration_details') settings_pg = BasicInformation(appliance_name=old_server_name) settings_pg.update() flash.assert_message_contain(flash_msg.format(old_server_name)) appliance.server.name = old_server_name clear_property_cache(appliance, 'configuration_details')
def test_distributed_vm_power_control(request, test_vm, vmware_provider, verify_vm_running, register_event, soft_assert): """Tests that a replication parent appliance can control the power state of a VM being managed by a replication child appliance. Metadata: test_flag: replication """ appl1, appl2 = get_replication_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: configure_db_replication(appl2.address) vmware_provider.create() wait_for_a_provider() appl2.ipapp.browser_steal = True with appl2.ipapp: register_event(test_vm.provider.type, "vm", test_vm.name, ["vm_power_off_req", "vm_power_off"]) test_vm.power_control_from_cfme(option=test_vm.POWER_OFF, cancel=False) flash.assert_message_contain("Stop initiated") pytest.sel.force_navigate('infrastructure_provider', context={'provider': test_vm.provider}) test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_OFF, timeout=900) soft_assert(test_vm.find_quadicon().state == 'currentstate-off') soft_assert(not test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm running")
def test_power_on(self, test_vm, verify_vm_stopped, soft_assert, register_event, bug): """Tests power on Metadata: test_flag: power_control, provision """ test_vm.wait_for_vm_state_change( desired_state='off', timeout=720, from_details=True) register_event( test_vm.provider.type, "vm", test_vm.name, ["vm_power_on_req", "vm_power_on"]) last_boot_time = test_vm.get_detail(properties=("Power Management", "Last Boot Time")) state_chg_time = test_vm.get_detail(properties=("Power Management", "State Changed On")) self._check_power_options_when_off(soft_assert, test_vm, from_details=True) test_vm.power_control_from_cfme(option=test_vm.POWER_ON, cancel=False, from_details=True) flash.assert_message_contain("Start initiated") pytest.sel.force_navigate( 'infrastructure_provider', context={'provider': test_vm.provider}) if_scvmm_refresh_provider(test_vm.provider) test_vm.wait_for_vm_state_change( desired_state=test_vm.STATE_ON, timeout=720, from_details=True) self._wait_for_last_boot_timestamp_refresh(test_vm, last_boot_time, timeout=600) soft_assert( test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm not running") new_state_chg_time = test_vm.get_detail(properties=("Power Management", "State Changed On")) soft_assert(new_state_chg_time != state_chg_time, "ui: {} == orig: {}".format(new_state_chg_time, state_chg_time)) if test_vm.provider.type != "scvmm": new_last_boot_time = test_vm.get_detail( properties=("Power Management", "Last Boot Time")) soft_assert(new_last_boot_time != last_boot_time, "ui: {} == orig: {}".format(new_last_boot_time, last_boot_time))
def delete_sched_and_files(): with get_ssh_client(db_backup_data.hostname, db_backup_data.credentials) as ssh: ssh.run_command('rm -rf {}'.format(full_path)) sched.delete() flash.assert_message_contain('Schedule "{}": Delete successful'.format( db_backup_data.schedule_description))
def test_add_hawkular_provider_ssl(provider, test_item, soft_assert): """This test checks adding container providers with 3 different security protocols: SSL trusting custom CA, SSL without validation and SSL The test checks the Default Endpoint as well as the Hawkular Endpoint Steps: * Navigate to Containers Menu * Navigate to Add Provider Menu * Try to add a Container Provider with each of the following security options: Default Endpoint = SSL trusting custom CA/SSL without validation/SSL Hawkular Endpoint = SSL trusting custom CA/SSL without validation/SSL * Assert that provider was added successfully """ new_provider = copy(provider) new_provider.endpoints[ 'default'].sec_protocol = test_item.default_sec_protocol new_provider.endpoints[ 'hawkular'].sec_protocol = test_item.hawkular_sec_protocol try: new_provider.setup() flash.assert_message_contain('Containers Providers "' + provider.name + '" was saved') except FlashMessageException: soft_assert( False, provider.name + ' wasn\'t added successfully using ' + test_item.default_sec_protocol + ' security protocol and ' + test_item.hawkular_sec_protocol + ' hawkular security protocol')
def delete_sched_and_files(): with get_ssh_client(db_backup_data.hostname, db_backup_data.credentials) as ssh: ssh.run_command('rm -rf {}'.format(full_path)) sched.delete() flash.assert_message_contain( 'Schedule "{}": Delete successful'.format(db_backup_data.schedule_description) )
def test_terminate(setup_provider_funcscope, provider, testing_instance, soft_assert, verify_vm_running): """ Tests instance terminate Metadata: test_flag: power_control, provision """ testing_instance.wait_for_vm_state_change( desired_state=testing_instance.STATE_ON, timeout=720, from_details=True) testing_instance.power_control_from_cfme( option=testing_instance.TERMINATE, cancel=False, from_details=True) flash.assert_message_contain({ version.LOWEST: "Terminate initiated", "5.5": "Vm Destroy initiated"}) testing_instance.wait_to_disappear(timeout=600) if provider.type == 'openstack': soft_assert(not testing_instance.does_vm_exist_on_provider(), "instance still exists") else: soft_assert( provider.mgmt.is_vm_state(testing_instance.name, provider.mgmt.states['deleted']), "instance still exists") sel.force_navigate("clouds_instances_archived_branch") soft_assert( testing_instance.name in get_all_instances(do_not_navigate=True), "instance is not among archived instances")
def configure_db_replication(db_address): """Enables the sync role and configures the appliance to replicate to the db_address specified. Then, it waits for the UI to show the replication as active and the backlog as empty. """ conf.set_replication_worker_host(db_address) flash.assert_message_contain("Configuration settings saved for CFME Server") try: sel.force_navigate("cfg_settings_currentserver_server") except WebDriverException: sel.handle_alert() sel.force_navigate("cfg_settings_currentserver_server") conf.set_server_roles(database_synchronization=True) sel.force_navigate("cfg_diagnostics_region_replication") wait_for( lambda: conf.get_replication_status(navigate=False), fail_condition=False, num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status", ) assert conf.get_replication_status() wait_for( lambda: conf.get_replication_backlog(navigate=False) == 0, fail_condition=False, num_sec=120, delay=10, fail_func=sel.refresh, message="get_replication_backlog", )
def test_add_hawkular_provider_ssl(provider, test_item, soft_assert): """This test checks adding container providers with 3 different security protocols: SSL trusting custom CA, SSL without validation and SSL The test checks the Default Endpoint as well as the Hawkular Endpoint Steps: * Navigate to Containers Menu * Navigate to Add Provider Menu * Try to add a Container Provider with each of the following security options: Default Endpoint = SSL trusting custom CA/SSL without validation/SSL Hawkular Endpoint = SSL trusting custom CA/SSL without validation/SSL * Assert that provider was added successfully """ prov = OpenshiftProvider( hawkular=True, sec_protocol=test_item.default_sec_protocol, hawkular_sec_protocol=test_item.hawkular_sec_protocol, name=provider.name, hostname=str(provider.hostname), hawkular_hostname=str(provider.hawkular_hostname), credentials=provider.credentials) try: prov.setup() flash.assert_message_contain('Containers Providers "' + provider.name + '" was saved') except FlashMessageException: soft_assert( False, provider.name + ' wasn\'t added successfully using ' + test_item.default_sec_protocol + ' security protocol and ' + test_item.hawkular_sec_protocol + ' hawkular security protocol')
def test_suspend(self, testing_vm, verify_vm_running, soft_assert): """Tests suspend Metadata: test_flag: power_control, provision """ testing_vm.wait_for_vm_state_change( desired_state=testing_vm.STATE_ON, timeout=720, from_details=True) last_boot_time = testing_vm.get_detail(properties=("Power Management", "Last Boot Time")) testing_vm.power_control_from_cfme(option=testing_vm.SUSPEND, cancel=False, from_details=True) flash.assert_message_contain("Suspend initiated") if_scvmm_refresh_provider(testing_vm.provider) try: testing_vm.wait_for_vm_state_change( desired_state=testing_vm.STATE_SUSPENDED, timeout=450, from_details=True) except TimedOutError as e: if testing_vm.provider.one_of(RHEVMProvider): logger.warning('working around bz1174858, ignoring timeout') else: raise e soft_assert( testing_vm.provider.mgmt.is_vm_suspended( testing_vm.name), "vm not suspended") # BUG - https://bugzilla.redhat.com/show_bug.cgi?id=1101604 if not testing_vm.provider.one_of(RHEVMProvider): new_last_boot_time = testing_vm.get_detail( properties=("Power Management", "Last Boot Time")) soft_assert(new_last_boot_time == last_boot_time, "ui: {} should == orig: {}".format(new_last_boot_time, last_boot_time))
def test_guest_os_shutdown(testing_vm_tools, verify_vm_running, soft_assert): testing_vm_tools.wait_for_vm_state_change( desired_state=testing_vm_tools.STATE_ON, timeout=720, from_details=True) wait_for_vm_tools(testing_vm_tools) last_boot_time = testing_vm_tools.get_detail( properties=("Power Management", "Last Boot Time")) testing_vm_tools.power_control_from_cfme( option=testing_vm_tools.GUEST_SHUTDOWN, cancel=False, from_details=True) flash.assert_message_contain("Shutdown Guest initiated") testing_vm_tools.wait_for_vm_state_change( desired_state=testing_vm_tools.STATE_OFF, timeout=720, from_details=True) soft_assert( not testing_vm_tools.provider.mgmt.is_vm_running( testing_vm_tools.name), "vm running") new_last_boot_time = testing_vm_tools.get_detail( properties=("Power Management", "Last Boot Time")) soft_assert( new_last_boot_time == last_boot_time, "ui: {} should == orig: {}".format(new_last_boot_time, last_boot_time))
def configure_db_replication(db_address): """Enables the sync role and configures the appliance to replicate to the db_address specified. Then, it waits for the UI to show the replication as active and the backlog as empty. """ conf.set_replication_worker_host(db_address) flash.assert_message_contain( "Configuration settings saved for CFME Server") try: sel.force_navigate("cfg_settings_currentserver_server") except WebDriverException: sel.handle_alert() sel.force_navigate("cfg_settings_currentserver_server") conf.set_server_roles(database_synchronization=True) sel.force_navigate("cfg_diagnostics_region_replication") wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=False, num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status") assert conf.get_replication_status() wait_for(lambda: conf.get_replication_backlog(navigate=False) == 0, fail_condition=False, num_sec=120, delay=10, fail_func=sel.refresh, message="get_replication_backlog")
def test_server_name(): """Tests that changing the server name updates the about page""" flash_msg = 'Configuration settings saved for CFME Server "{}' navigate_to(current_appliance.server, 'Server') old_server_name = sel.value(BasicInformation.basic_information.appliance_name) new_server_name = old_server_name + "-CFME" settings_pg = BasicInformation(appliance_name=new_server_name) settings_pg.update() flash.assert_message_contain(flash_msg.format(new_server_name)) # CFME updates about box only after any navigation BZ(1408681) navigate_to(current_appliance.server, 'Dashboard') # if version.current_version() < '5.7': # current_server_name = InfoBlock('Session Information', 'Server Name').text # navigate_to(current_appliance.server, 'About') # else: current_server_name = get_detail('Server Name') close_button = sel.element('//div[contains(@class, "about-modal-pf")]//button[@class="close"]') close_button.click() assert new_server_name == current_server_name, \ "Server name in About section does not match the new name" clear_property_cache(store.current_appliance, 'configuration_details') settings_pg = BasicInformation(appliance_name=old_server_name) settings_pg.update() flash.assert_message_contain(flash_msg.format(old_server_name)) clear_property_cache(store.current_appliance, 'configuration_details')
def test_power_off(self, test_vm, verify_vm_running, soft_assert, register_event, bug): """Tests power off Metadata: test_flag: power_control, provision """ test_vm.wait_for_vm_state_change( desired_state=test_vm.STATE_ON, timeout=720, from_details=True) last_boot_time = test_vm.get_detail(properties=("Power Management", "Last Boot Time")) register_event( test_vm.provider.type, "vm", test_vm.name, ["vm_power_off_req", "vm_power_off"]) self._check_power_options_when_on(soft_assert, test_vm, bug, from_details=True) test_vm.power_control_from_cfme(option=test_vm.POWER_OFF, cancel=False, from_details=True) flash.assert_message_contain("Stop initiated") pytest.sel.force_navigate( 'infrastructure_provider', context={'provider': test_vm.provider}) if_scvmm_refresh_provider(test_vm.provider) test_vm.wait_for_vm_state_change( desired_state='off', timeout=720, from_details=True) soft_assert( not test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm running") # BUG - https://bugzilla.redhat.com/show_bug.cgi?id=1101604 if test_vm.provider.type != "rhevm": new_last_boot_time = test_vm.get_detail( properties=("Power Management", "Last Boot Time")) soft_assert(new_last_boot_time == last_boot_time, "ui: {} should == orig: {}".format(new_last_boot_time, last_boot_time))
def test_hard_reboot(provider, testing_instance, verify_vm_running, soft_assert): """ Tests instance hard reboot Metadata: test_flag: power_control, provision """ testing_instance.wait_for_instance_state_change( desired_state=testing_instance.STATE_ON) navigate_to(testing_instance, 'Details') state_change_time = testing_instance.get_detail( properties=('Power Management', 'State Changed On')) testing_instance.power_control_from_cfme( option=testing_instance.HARD_REBOOT) flash.assert_message_contain("Reset initiated") wait_for_ui_state_refresh(testing_instance, provider, state_change_time, timeout=720) testing_instance.wait_for_instance_state_change( desired_state=testing_instance.STATE_ON) soft_assert(provider.mgmt.is_vm_running(testing_instance.name), "instance is not running")
def test_start_from_suspend(self, testing_vm, verify_vm_suspended, soft_assert): """Tests start from suspend Metadata: test_flag: power_control, provision """ try: testing_vm.provider.refresh_provider_relationships() testing_vm.wait_for_vm_state_change( desired_state=testing_vm.STATE_SUSPENDED, timeout=450, from_details=True) except TimedOutError: if testing_vm.provider.one_of(RHEVMProvider): logger.warning('working around bz1174858, ignoring timeout') else: raise last_boot_time = testing_vm.get_detail(properties=("Power Management", "Last Boot Time")) testing_vm.power_control_from_cfme(option=testing_vm.POWER_ON, cancel=False, from_details=True) flash.assert_message_contain("Start initiated") if_scvmm_refresh_provider(testing_vm.provider) testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_ON, timeout=720, from_details=True) wait_for_last_boot_timestamp_refresh(testing_vm, last_boot_time, timeout=600) soft_assert(testing_vm.provider.mgmt.is_vm_running(testing_vm.name), "vm not running")
def test_soft_reboot(setup_provider_funcscope, provider, testing_instance, soft_assert, verify_vm_running): """ Tests instance soft reboot Metadata: test_flag: power_control, provision """ testing_instance.wait_for_instance_state_change(desired_state=testing_instance.STATE_ON) state_change_time = testing_instance.get_detail(properties=('Power Management', 'State Changed On')) testing_instance.power_control_from_cfme(option=testing_instance.SOFT_REBOOT) flash.assert_message_contain('Restart Guest initiated') wait_for_state_change_time_refresh(testing_instance, provider, state_change_time, timeout=720) if provider.type == 'gce' \ and testing_instance.get_detail(properties=('Power Management', 'Power State')) \ == testing_instance.STATE_UNKNOWN: """Wait for one more state change as transitional state also changes "State Changed On" time on GCE provider """ logger.info("Instance is still in \"{}\" state. please wait before CFME will show correct " "state".format(testing_instance.get_detail(properties=('Power Management', 'Power State')))) state_change_time = testing_instance.get_detail(properties=('Power Management', 'State Changed On')) wait_for_state_change_time_refresh(testing_instance, provider, state_change_time, timeout=720) testing_instance.wait_for_instance_state_change(desired_state=testing_instance.STATE_ON) soft_assert( provider.mgmt.is_vm_running(testing_instance.name), "instance is not running")
def test_terminate(setup_provider_funcscope, provider, testing_instance, soft_assert, verify_vm_running): """ Tests instance terminate Metadata: test_flag: power_control, provision """ testing_instance.wait_for_vm_state_change( desired_state=testing_instance.STATE_ON, timeout=720, from_details=True) testing_instance.power_control_from_cfme(option=testing_instance.TERMINATE, cancel=False, from_details=True) flash.assert_message_contain({ version.LOWEST: "Terminate initiated", "5.5": "Vm Destroy initiated" }) testing_instance.wait_to_disappear(timeout=600) if provider.type == 'openstack': soft_assert(not testing_instance.does_vm_exist_on_provider(), "instance still exists") else: soft_assert( provider.mgmt.is_vm_state(testing_instance.name, provider.mgmt.states['deleted']), "instance still exists") sel.force_navigate("clouds_instances_archived_branch") soft_assert( testing_instance.name in get_all_instances(do_not_navigate=True), "instance is not among archived instances")
def test_terminate(setup_provider_funcscope, provider, test_instance, soft_assert, verify_vm_running): """ Tests instance terminate Metadata: test_flag: power_control, provision """ test_instance.wait_for_vm_state_change( desired_state=test_instance.STATE_ON, timeout=720, from_details=True) test_instance.power_control_from_cfme(option=test_instance.TERMINATE, cancel=False, from_details=True) flash.assert_message_contain("Terminate initiated") wait_for( test_instance.does_vm_exist_in_cfme, fail_condition=True, num_sec=600, delay=30, fail_func=test_instance.provider_crud.refresh_provider_relationships, message="VM no longer exists in cfme UI") if provider.type == 'openstack': soft_assert(not provider.mgmt.does_vm_exist(test_instance.name), "instance still exists") else: soft_assert( provider.mgmt.is_vm_state(test_instance.name, provider.mgmt.states['deleted']), "instance still exists") sel.force_navigate("clouds_instances_archived_branch") soft_assert(test_instance.name in get_all_instances(do_not_navigate=True), "instance is not among archived instances")
def delete(self, from_dest='All'): """ Delete the stack, starting from the destination provided by from_dest @param from_dest: where to delete from, a valid navigation destination for Stack """ # Navigate to the starting destination if from_dest in navigator.list_destinations(self): navigate_to(self, from_dest) else: msg = 'cfme.cloud.stack does not have destination {}'.format(from_dest) raise DestinationNotFound(msg) # Delete using the method appropriate for the starting destination if from_dest == 'All': sel.check(Quadicon(self.name, self.quad_name).checkbox()) cfg_btn("Remove Orchestration Stacks", invokes_alert=True) elif from_dest == 'Details': cfg_btn("Remove this Orchestration Stack", invokes_alert=True) sel.handle_alert() # The delete initiated message may get missed if the delete is fast try: flash.assert_message_contain("Delete initiated for 1 Orchestration Stacks") except FlashMessageException as ex: if 'No flash message contains' in ex.message: flash.assert_message_contain("The selected Orchestration Stacks was deleted") self.wait_for_delete()
def do_scan(vm, additional_item_check=None): if vm.rediscover_if_analysis_data_present(): # policy profile assignment is lost so reassign vm.assign_policy_profiles(*vm._assigned_pp) def _scan(): return vm.get_detail(properties=("Lifecycle", "Last Analyzed")).lower() original = _scan() if additional_item_check is not None: original_item = vm.get_detail(properties=additional_item_check) vm.smartstate_scan(cancel=False, from_details=True) flash.assert_message_contain("Smart State Analysis initiated") logger.info("Scan initiated") wait_for(lambda: _scan() != original, num_sec=600, delay=5, fail_func=lambda: toolbar.select("Reload")) if additional_item_check is not None: wait_for(lambda: vm.get_detail(properties=additional_item_check) != original_item, num_sec=120, delay=5, fail_func=lambda: toolbar.select("Reload")) logger.info("Scan finished")
def test_soft_reboot(setup_provider_funcscope, provider, testing_instance, soft_assert, verify_vm_running): """ Tests instance soft reboot Metadata: test_flag: power_control, provision """ testing_instance.wait_for_vm_state_change( desired_state=testing_instance.STATE_ON, timeout=720, from_details=True) state_change_time = testing_instance.get_detail( ('Power Management', 'State Changed On')) testing_instance.power_control_from_cfme( option=testing_instance.SOFT_REBOOT, cancel=False, from_details=True) flash.assert_message_contain( version.pick({ version.LOWEST: "Restart initiated", "5.5": "Restart Guest initiated" })) wait_for_state_change_time_refresh(testing_instance, state_change_time, timeout=720) testing_instance.wait_for_vm_state_change( desired_state=testing_instance.STATE_ON, from_details=True) soft_assert(provider.mgmt.is_vm_running(testing_instance.name), "instance is not running")
def test_power_off(self, provider_init, test_vm, verify_vm_running, soft_assert): test_vm.wait_for_vm_state_change(desired_state=Vm.STATE_ON, timeout_in_minutes=12, from_details=True) last_boot_time = test_vm.get_detail(properties=("Power Management", "Last Boot Time")) # register_event( # test_vm.provider_crud.get_yaml_data()['type'], # "vm", vm_name, ["vm_power_on_req", "vm_power_on"]) self._check_power_options_when_on(soft_assert, test_vm, from_details=True) test_vm.power_control_from_cfme(option=Vm.POWER_OFF, cancel=False, from_details=True) flash.assert_message_contain("Stop initiated") pytest.sel.force_navigate('infrastructure_provider', context={'provider': test_vm.provider_crud}) test_vm.wait_for_vm_state_change(desired_state='off', timeout_in_minutes=12, from_details=True) soft_assert( not test_vm.provider_crud.get_mgmt_system().is_vm_running( test_vm.name), "vm running") # BUG - https://bugzilla.redhat.com/show_bug.cgi?id=1101604 if not isinstance(test_vm.provider_crud, RHEVMProvider): new_last_boot_time = test_vm.get_detail( properties=("Power Management", "Last Boot Time")) soft_assert( new_last_boot_time == last_boot_time, "ui: " + new_last_boot_time + " should == orig: " + last_boot_time)
def test_server_name(appliance): """Tests that changing the server name updates the about page""" flash_msg = 'Configuration settings saved for CFME Server "{}' navigate_to(appliance.server, 'Server') old_server_name = sel.value( BasicInformation.basic_information.appliance_name) new_server_name = old_server_name + "-CFME" settings_pg = BasicInformation(appliance_name=new_server_name) settings_pg.update() flash.assert_message_contain(flash_msg.format(new_server_name)) appliance.server.name = new_server_name # CFME updates about box only after any navigation BZ(1408681) navigate_to(appliance.server, 'Dashboard') current_server_name = get_detail('Server Name') if version.current_version() >= '5.7': # New modal sel.click( '//div[contains(@class, "about-modal-pf")]//button[@class="close"]' ) assert new_server_name == current_server_name, \ "Server name in About section does not match the new name" clear_property_cache(store.current_appliance, 'configuration_details') settings_pg = BasicInformation(appliance_name=old_server_name) settings_pg.update() flash.assert_message_contain(flash_msg.format(old_server_name)) appliance.server.name = old_server_name clear_property_cache(store.current_appliance, 'configuration_details')
def test_add_provider_ssl(provider, default_sec_protocols, soft_assert): """ This test checks adding container providers with 3 different security protocols: SSL trusting custom CA, SSL without validation and SSL Steps: * Navigate to Containers Menu * Navigate to Add Provider Menu * Try to add a Container Provider with each of the following security options: Default Endpoint = SSL trusting custom CA/SSL without validation/SSL * Assert that provider was added successfully """ prov = OpenshiftProvider(sec_protocol=default_sec_protocols, name=provider.name, hostname=str(provider.hostname), hawkular_hostname=str(provider.hawkular_hostname), hawkular_api_port=str(provider.hawkular_api_port), hawkular_sec_protocol=str( provider.hawkular_sec_protocol), credentials=provider.credentials) try: prov.create() flash.assert_message_contain('Containers Providers "' + provider.name + '" was saved') except FlashMessageException: soft_assert( False, provider.name + ' wasn\'t added successfully using ' + default_sec_protocols + ' security protocol') ContainersProvider.clear_providers()
def test_sat5_incorrect_url_format_check(request, unset_org_id): # Check that we weren't allowed to save the data with error.expected("No matching flash message"): red_hat_updates.update_registration( service="sat5", url="url.not.matching.format.example.com", username="******", password="******" ) # Confirm that it was the Sat5 url check that blocked it flash.assert_message_contain("https://server.example.com/XMLRPC")
def run_smartstate_analysis(self): navigate_to(self, 'Details') tb.select('Configuration', 'Perform SmartState Analysis', invokes_alert=True) sel.handle_alert(cancel=False) flash.assert_message_contain( 'Cluster / Deployment Role: scan successfully initiated')
def run_smartstate_analysis(self): """ Runs smartstate analysis on this host Note: The host must have valid credentials already set up for this to work. """ sel.force_navigate('infrastructure_host', context={'host': self}) tb.select('Configuration', 'Perform SmartState Analysis', invokes_alert=True) sel.handle_alert() flash.assert_message_contain('"{}": Analysis successfully initiated'.format(self.name))
def test_sat5_incorrect_url_format_check(request, unset_org_id): # Check that we weren't allowed to save the data with error.expected('No matching flash message'): red_hat_updates.update_registration( service="sat5", url="url.not.matching.format.example.com", username="******", password="******") # Confirm that it was the Sat5 url check that blocked it flash.assert_message_contain("https://server.example.com/XMLRPC")
def delete(self, cancel=False): sel.force_navigate("automate_explorer_table_select", context={'tree_item': self.parent, 'table_item': self}) if len(self.path) > 1: cfg_btn('Remove selected Items', invokes_alert=True) else: cfg_btn('Remove Namespaces', invokes_alert=True) sel.handle_alert(cancel) flash.assert_message_contain('Delete successful') flash.assert_success_message('The selected Automate Namespaces were deleted')
def run_smartstate_analysis(self): """ Runs smartstate analysis on this host Note: The host must have valid credentials already set up for this to work. """ self.load_details() tb.select('Configuration', 'Perform SmartState Analysis', invokes_alert=True) sel.handle_alert() flash.assert_message_contain('"{}": scan successfully initiated'.format(self.name))
def test_start_from_suspend(self, test_vm, verify_vm_suspended, soft_assert, register_event, bug): """Tests start from suspend Metadata: test_flag: power_control, provision """ try: test_vm.provider_crud.refresh_provider_relationships() test_vm.wait_for_vm_state_change(desired_state=Vm.STATE_SUSPENDED, timeout=450, from_details=True) except TimedOutError as e: if isinstance(test_vm.provider_crud, RHEVMProvider): logger.warning('working around bz1174858, ignoring timeout') else: raise e register_event(test_vm.provider_crud.get_yaml_data()['type'], "vm", test_vm.name, ["vm_power_on_req", "vm_power_on"]) last_boot_time = test_vm.get_detail(properties=("Power Management", "Last Boot Time")) state_chg_time = test_vm.get_detail(properties=("Power Management", "State Changed On")) self._check_power_options_when_off(soft_assert, test_vm, from_details=True) test_vm.power_control_from_cfme(option=Vm.POWER_ON, cancel=False, from_details=True) flash.assert_message_contain("Start initiated") pytest.sel.force_navigate('infrastructure_provider', context={'provider': test_vm.provider_crud}) if_scvmm_refresh_provider(test_vm.provider_crud) test_vm.wait_for_vm_state_change(desired_state=Vm.STATE_ON, timeout=720, from_details=True) self._wait_for_last_boot_timestamp_refresh(test_vm, last_boot_time, timeout=600) soft_assert( test_vm.provider_crud.get_mgmt_system().is_vm_running( test_vm.name), "vm not running") new_state_chg_time = test_vm.get_detail( properties=("Power Management", "State Changed On")) soft_assert( new_state_chg_time != state_chg_time, "ui: {} should != orig: {}".format(new_state_chg_time, state_chg_time)) if not isinstance(test_vm.provider_crud, SCVMMProvider): new_last_boot_time = test_vm.get_detail( properties=("Power Management", "Last Boot Time")) soft_assert( new_last_boot_time != last_boot_time, "ui: {} should != orig: {}".format(new_last_boot_time, last_boot_time))
def test_power_off(self, test_vm, verify_vm_running, soft_assert): test_vm.wait_for_vm_state_change(desired_state=Vm.STATE_ON, timeout_in_minutes=12) # register_event( # test_vm.provider_crud.get_yaml_data()['type'], # "vm", vm_name, ["vm_power_on_req", "vm_power_on"]) test_vm.power_control_from_cfme(option=Vm.POWER_OFF, cancel=False) flash.assert_message_contain("Stop initiated") pytest.sel.force_navigate("infrastructure_provider", context={"provider": test_vm.provider_crud}) test_vm.wait_for_vm_state_change(desired_state=Vm.STATE_OFF, timeout_in_minutes=15) soft_assert(test_vm.find_quadicon().state == "currentstate-off") soft_assert(not test_vm.provider_crud.get_mgmt_system().is_vm_running(test_vm.name), "vm running")
def test_guest_os_reset(testing_vm_tools, verify_vm_running, soft_assert): testing_vm_tools.wait_for_vm_state_change( desired_state=testing_vm_tools.STATE_ON, timeout=720, from_details=True) wait_for_vm_tools(testing_vm_tools) last_boot_time = testing_vm_tools.get_detail(properties=("Power Management", "Last Boot Time")) testing_vm_tools.power_control_from_cfme( option=testing_vm_tools.GUEST_RESTART, cancel=False, from_details=True) flash.assert_message_contain("Restart Guest initiated") testing_vm_tools.wait_for_vm_state_change( desired_state=testing_vm_tools.STATE_ON, timeout=720, from_details=True) wait_for_last_boot_timestamp_refresh(testing_vm_tools, last_boot_time) soft_assert( testing_vm_tools.provider.mgmt.is_vm_running(testing_vm_tools.name), "vm not running")
def test_resume(provider, testing_instance, verify_vm_suspended, soft_assert): """ Tests instance resume Metadata: test_flag: power_control, provision """ testing_instance.wait_for_instance_state_change(desired_state=testing_instance.STATE_SUSPENDED) testing_instance.power_control_from_cfme(option=testing_instance.START) flash.assert_message_contain("Start initiated") testing_instance.wait_for_instance_state_change(desired_state=testing_instance.STATE_ON) soft_assert( provider.mgmt.is_vm_running(testing_instance.name), "instance is not running")
def run_smartstate_analysis(self): """ Runs smartstate analysis on this host Note: The host must have valid credentials already set up for this to work. """ self.load_details() tb.select('Configuration', 'Perform SmartState Analysis', invokes_alert=True) sel.handle_alert() flash.assert_message_contain( '"{}": scan successfully initiated'.format(self.name))
def test_power_on(self, test_vm, verify_vm_stopped, soft_assert, register_event): test_vm.wait_for_vm_state_change(desired_state=Vm.STATE_OFF, timeout=720) register_event( test_vm.provider_crud.get_yaml_data()['type'], "vm", test_vm.name, ["vm_power_on_req", "vm_power_on"]) test_vm.power_control_from_cfme(option=Vm.POWER_ON, cancel=False) flash.assert_message_contain("Start initiated") pytest.sel.force_navigate( 'infrastructure_provider', context={'provider': test_vm.provider_crud}) test_vm.wait_for_vm_state_change(desired_state=Vm.STATE_ON, timeout=900) soft_assert(test_vm.find_quadicon().state == 'currentstate-on') soft_assert( test_vm.provider_crud.get_mgmt_system().is_vm_running(test_vm.name), "vm not running")
def test_terminate(provider, testing_instance, verify_vm_running, soft_assert): """ Tests instance terminate Metadata: test_flag: power_control, provision """ testing_instance.wait_for_instance_state_change(desired_state=testing_instance.STATE_ON) testing_instance.power_control_from_cfme(option=testing_instance.TERMINATE) flash.assert_message_contain('Vm Destroy initiated') terminated_states = (testing_instance.STATE_TERMINATED, testing_instance.STATE_ARCHIVED, testing_instance.STATE_UNKNOWN) soft_assert(testing_instance.wait_for_instance_state_change(desired_state=terminated_states, timeout=1200))
def test_power_on(self, testing_vm, verify_vm_stopped, soft_assert): """Tests power on Metadata: test_flag: power_control, provision """ testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_OFF, timeout=720) testing_vm.power_control_from_cfme(option=testing_vm.POWER_ON, cancel=False) flash.assert_message_contain("Start initiated") if_scvmm_refresh_provider(testing_vm.provider) testing_vm.wait_for_vm_state_change(desired_state=testing_vm.STATE_ON, timeout=900) soft_assert('currentstate-on' in testing_vm.find_quadicon().state) soft_assert( testing_vm.provider.mgmt.is_vm_running(testing_vm.name), "vm not running")
def perform_smartstate_analysis(self, wait_for_finish=False, timeout='7M'): """Performing SmartState Analysis on this Image """ navigate_to(self, 'Details') tb.select('Configuration', 'Perform SmartState Analysis', invokes_alert=True) sel.handle_alert() flash.assert_message_contain('Analysis successfully initiated') if wait_for_finish: try: tasks.wait_analysis_finished('Container image analysis', 'container', timeout=timeout) except TimedOutError: raise TimedOutError('Timeout exceeded, Waited too much time for SSA to finish ({}).' .format(timeout))
def test_terminate(setup_provider_funcscope, provider, testing_instance, soft_assert, verify_vm_running): """ Tests instance terminate Metadata: test_flag: power_control, provision """ testing_instance.wait_for_vm_state_change( desired_state=testing_instance.STATE_ON, timeout=720, from_details=True) testing_instance.power_control_from_cfme( option=testing_instance.TERMINATE, cancel=False, from_details=True) flash.assert_message_contain({ version.LOWEST: "Terminate initiated", "5.5": "Vm Destroy initiated"}) soft_assert(wait_for_termination(provider, testing_instance), "Instance still exists")
def test_unpause( setup_provider_funcscope, provider, testing_instance, soft_assert, verify_vm_paused): """ Tests instance unpause Metadata: test_flag: power_control, provision """ testing_instance.wait_for_instance_state_change(desired_state=testing_instance.STATE_PAUSED) testing_instance.power_control_from_cfme(option=testing_instance.START) flash.assert_message_contain("Start initiated") testing_instance.wait_for_instance_state_change(desired_state=testing_instance.STATE_ON) soft_assert( provider.mgmt.is_vm_running(testing_instance.name), "instance is not running")