def test_distributed_vm_power_control(request, test_vm, virtualcenter_provider, ensure_vm_running, register_event, soft_assert): """Tests that a replication parent appliance can control the power state of a VM being managed by a replication child appliance. Metadata: test_flag: replication """ appl1, appl2 = get_replication_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: configure_db_replication(appl2.hostname) virtualcenter_provider.create() wait_for_a_provider() appl2.ipapp.browser_steal = True with appl2.ipapp: register_event(target_type='VmOrTemplate', target_name=test_vm.name, event_type='request_vm_poweroff') register_event(target_type='VmOrTemplate', target_name=test_vm.name, event_type='vm_poweroff') test_vm.power_control_from_cfme(option=test_vm.POWER_OFF, cancel=False) navigate_to(test_vm.provider, 'Details') test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_OFF, timeout=900) soft_assert(test_vm.find_quadicon().data['state'] == 'currentstate-off') soft_assert( not test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm running")
def test_distributed_vm_power_control(request, test_vm, virtualcenter_provider, verify_vm_running, register_event, soft_assert): """Tests that a replication parent appliance can control the power state of a VM being managed by a replication child appliance. Metadata: test_flag: replication """ appl1, appl2 = get_replication_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: configure_db_replication(appl2.hostname) virtualcenter_provider.create() wait_for_a_provider() appl2.ipapp.browser_steal = True with appl2.ipapp: register_event(target_type='VmOrTemplate', target_name=test_vm.name, event_type='request_vm_poweroff') register_event(target_type='VmOrTemplate', target_name=test_vm.name, event_type='vm_poweroff') test_vm.power_control_from_cfme(option=test_vm.POWER_OFF, cancel=False) navigate_to(test_vm.provider, 'Details') test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_OFF, timeout=900) soft_assert(test_vm.find_quadicon().data['state'] == 'currentstate-off') soft_assert( not test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm running")
def test_appliance_replicate_database_disconnection(request, virtualcenter_provider, appliance): """Tests a database disconnection Metadata: test_flag: replication """ appl1, appl2 = get_replication_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: configure_db_replication(appl2.address) # Replication is up and running, now stop the DB on the replication parent stop_db_process(appl2.address) sleep(60) start_db_process(appl2.address) navigate_to(appliance.server.zone.region, 'Replication') wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=False, num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status") assert conf.get_replication_status() virtualcenter_provider.create() wait_for_a_provider() appl2.ipapp.browser_steal = True with appl2.ipapp: wait_for_a_provider() assert virtualcenter_provider.exists
def test_appliance_replicate_sync_role_change(request, virtualcenter_provider, appliance): """Tests that a role change is replicated Metadata: test_flag: replication """ appl1, appl2 = get_replication_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: configure_db_replication(appl2.address) # Replication is up and running, now disable DB sync role conf.set_server_roles(database_synchronization=False) navigate_to(appliance.server.zone.region, 'Replication') wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=True, num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status") conf.set_server_roles(database_synchronization=True) navigate_to(appliance.server.zone.region, 'Replication') wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=False, num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status") assert conf.get_replication_status() virtualcenter_provider.create() wait_for_a_provider() appl2.ipapp.browser_steal = True with appl2.ipapp: wait_for_a_provider() assert virtualcenter_provider.exists
def test_db_restore(request, soft_assert, virtualcenter_provider_crud, ec2_provider_crud): appl1, appl2 = get_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: # Manage infra,cloud providers and set some roles before taking a DB backup config.set_server_roles(automate=True) roles = config.get_server_roles() virtualcenter_provider_crud.setup() wait_for_a_provider() ec2_provider_crud.setup() cloud_provider.wait_for_a_provider() providers_appl1 = appl1.ipapp.managed_known_providers appl1.ipapp.db.backup() # Fetch v2_key and DB backup from the first appliance with appl1.ipapp.ssh_client as ssh: rand_filename = "/tmp/v2_key_{}".format(fauxfactory.gen_alphanumeric()) ssh.get_file("/var/www/miq/vmdb/certs/v2_key", rand_filename) dump_filename = "/tmp/db_dump_{}".format(fauxfactory.gen_alphanumeric()) ssh.get_file("/tmp/evm_db.backup", dump_filename) with appl2.ipapp.ssh_client as ssh: ssh.put_file(rand_filename, "/var/www/miq/vmdb/certs/v2_key") ssh.put_file(dump_filename, "/tmp/evm_db.backup") appl2.ipapp.browser_steal = True with appl2.ipapp: # Restore DB on the second appliance appl2.ipapp.evmserverd.stop() appl2.ipapp.db.drop() appl2.ipapp.db.restore() appl2.ipapp.start_evm_service() appl2.ipapp.wait_for_web_ui() wait_for_a_provider() cloud_provider.wait_for_a_provider() # Assert providers on the second appliance providers_appl2 = appl2.ipapp.managed_known_providers assert set(providers_appl2).issubset(providers_appl1),\ 'Restored DB is missing some providers' # Verify that existing provider can detect new VMs on the second appliance vm = provision_vm(request, virtualcenter_provider_crud) soft_assert(vm.find_quadicon().data['state'] == 'currentstate-on') soft_assert(vm.provider.mgmt.is_vm_running(vm.name), "vm running") # Assert server roles on the second appliance for role, is_enabled in config.get_server_roles(db=False).iteritems(): if is_enabled: assert roles[role], "Role '{}' is selected but should not be".format(role) else: assert not roles[role], "Role '{}' is not selected but should be".format(role)
def test_distributed_vm_power_control(request, test_vm, vmware_provider, verify_vm_running, register_event, soft_assert): """Tests that a replication parent appliance can control the power state of a VM being managed by a replication child appliance. Metadata: test_flag: replication """ appl1, appl2 = get_replication_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: configure_db_replication(appl2.address) vmware_provider.create() wait_for_a_provider() appl2.ipapp.browser_steal = True with appl2.ipapp: register_event(test_vm.provider.type, "vm", test_vm.name, ["vm_power_off_req", "vm_power_off"]) test_vm.power_control_from_cfme(option=test_vm.POWER_OFF, cancel=False) flash.assert_message_contain("Stop initiated") pytest.sel.force_navigate('infrastructure_provider', context={'provider': test_vm.provider}) test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_OFF, timeout=900) soft_assert(test_vm.find_quadicon().state == 'currentstate-off') soft_assert(not test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm running")
def test_appliance_replicate_database_disconnection(request, vmware_provider): """Tests a database disconnection Metadata: test_flag: replication """ appl1, appl2 = get_replication_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: configure_db_replication(appl2.address) # Replication is up and running, now stop the DB on the replication parent stop_db_process(appl2.address) sleep(60) start_db_process(appl2.address) sel.force_navigate("cfg_diagnostics_region_replication") wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=False, num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status") assert conf.get_replication_status() vmware_provider.create() wait_for_a_provider() appl2.ipapp.browser_steal = True with appl2.ipapp: wait_for_a_provider() assert vmware_provider.exists
def test_appliance_replicate_sync_role_change_with_backlog(request, virtualcenter_provider, appliance): """Tests that a role change is replicated with backlog Metadata: test_flag: replication """ appl1, appl2 = get_replication_appliances() replication_conf = appliance.server.zone.region.replication def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: server_settings = appliance.server.settings configure_db_replication(appl2.hostname) # Replication is up and running, now disable DB sync role virtualcenter_provider.create() server_settings.disable_server_roles('database_synchronization') wait_for(replication_conf.get_replication_status, fail_condition=True, num_sec=360, delay=10, fail_func=appl1.server.browser.refresh, message="get_replication_status") server_settings.enable_server_roles('database_synchronization') wait_for(replication_conf.get_replication_status, fail_condition=False, num_sec=360, delay=10, fail_func=appl1.server.browser.refresh, message="get_replication_status") assert replication_conf.get_replication_status() wait_for_a_provider() appl2.ipapp.browser_steal = True with appl2.ipapp: wait_for_a_provider() assert virtualcenter_provider.exists
def test_appliance_replicate_sync_role_change_with_backlog(request, provider): """Tests that a role change is replicated with backlog Metadata: test_flag: replication """ appl1, appl2 = get_replication_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: configure_db_replication(appl2.address) # Replication is up and running, now disable DB sync role provider.create() conf.set_server_roles(database_synchronization=False) sel.force_navigate("cfg_diagnostics_region_replication") wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=True, num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status") conf.set_server_roles(database_synchronization=True) sel.force_navigate("cfg_diagnostics_region_replication") wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=False, num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status") assert conf.get_replication_status() wait_for_a_provider() appl2.ipapp.browser_steal = True with appl2.ipapp: wait_for_a_provider() assert provider.exists
def test_appliance_replicate_database_disconnection_with_backlog(request, virtualcenter_provider, appliance): """Tests a database disconnection with backlog Metadata: test_flag: replication """ appl1, appl2 = get_replication_appliances() replication_conf = appliance.server.zone.region.replication def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: configure_db_replication(appl2.hostname) # Replication is up and running, now stop the DB on the replication parent virtualcenter_provider.create() appl2.db.stop_db_service() sleep(60) appl2.db.start_db_service() wait_for(replication_conf.get_replication_status, fail_condition=False, num_sec=360, delay=10, fail_func=appl1.server.browser.refresh, message="get_replication_status") assert replication_conf.get_replication_status() wait_for_a_provider() appl2.ipapp.browser_steal = True with appl2.ipapp: wait_for_a_provider() assert virtualcenter_provider.exists
def test_appliance_replicate_database_disconnection_with_backlog(request, provider): """Tests a database disconnection with backlog Metadata: test_flag: replication """ appl1, appl2 = get_replication_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: configure_db_replication(appl2.address) # Replication is up and running, now stop the DB on the replication parent provider.create() stop_db_process(appl2.address) sleep(60) start_db_process(appl2.address) sel.force_navigate("cfg_diagnostics_region_replication") wait_for(lambda: conf.get_replication_status(navigate=False), fail_condition=False, num_sec=360, delay=10, fail_func=sel.refresh, message="get_replication_status") assert conf.get_replication_status() wait_for_a_provider() appl2.ipapp.browser_steal = True with appl2.ipapp: wait_for_a_provider() assert provider.exists
def test_distributed_vm_power_control(request, test_vm, provider, verify_vm_running, register_event, soft_assert, setup_provider): """Tests that a replication parent appliance can control the power state of a VM being managed by a replication child appliance. Metadata: test_flag: replication """ appl1, appl2 = get_replication_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: configure_db_replication(appl2.address) provider.create() wait_for_a_provider() appl2.ipapp.browser_steal = True with appl2.ipapp: register_event( test_vm.provider.type, "vm", test_vm.name, ["vm_power_off_req", "vm_power_off"]) test_vm.power_control_from_cfme(option=test_vm.POWER_OFF, cancel=False) flash.assert_message_contain("Stop initiated") pytest.sel.force_navigate( 'infrastructure_provider', context={'provider': test_vm.provider}) test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_OFF, timeout=900) soft_assert(test_vm.find_quadicon().state == 'currentstate-off') soft_assert( not test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm running")
def test_db_restore(request, soft_assert): appl1, appl2 = get_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: # Manage infra,cloud providers and set some roles before taking a DB backup config.set_server_roles(automate=True) roles = config.get_server_roles() provider_crud = setup_a_provider('infra', 'virtualcenter', validate=True) provider_mgmt = provider_crud.get_mgmt_system() wait_for_a_provider() setup_a_provider('cloud', 'ec2', validate=True) cloud_provider.wait_for_a_provider() providers_appl1 = appl1.ipapp.managed_providers appl1.ipapp.backup_database() # Fetch v2_key and DB backup from the first appliance with appl1.ipapp.ssh_client as ssh: rand_filename = "/tmp/v2_key_{}".format(fauxfactory.gen_alphanumeric()) ssh.get_file("/var/www/miq/vmdb/certs/v2_key", rand_filename) dump_filename = "/tmp/db_dump_{}".format(fauxfactory.gen_alphanumeric()) ssh.get_file("/tmp/evm_db.backup", dump_filename) with appl2.ipapp.ssh_client as ssh: ssh.put_file(rand_filename, "/var/www/miq/vmdb/certs/v2_key") ssh.put_file(dump_filename, "/tmp/evm_db.backup") appl2.ipapp.browser_steal = True with appl2.ipapp: # Restore DB on the second appliance appl2.ipapp.restore_database() appl2.ipapp.wait_for_web_ui() wait_for_a_provider() cloud_provider.wait_for_a_provider() # Assert providers on the second appliance providers_appl2 = appl2.ipapp.managed_providers assert set(providers_appl2).issubset(providers_appl1),\ 'Restored DB is missing some providers' # Verify that existing provider can detect new VMs on the second appliance vm = provision_vm(request, provider_crud, provider_mgmt) soft_assert(vm.find_quadicon().state == 'currentstate-on') soft_assert(vm.provider_crud.get_mgmt_system().is_vm_running(vm.name), "vm running") # Assert server roles on the second appliance for role, is_enabled in config.get_server_roles(db=False).iteritems(): if is_enabled: assert roles[role], "Role '%s' is selected but should not be" % role else: assert not roles[role], "Role '%s' is not selected but should be" % role
def test_providers_discovery(request, provider): """Tests provider discovery Metadata: test_flag: crud """ provider.discover() flash.assert_message_match('Infrastructure Providers: Discovery successfully initiated') request.addfinalizer(providers.clear_infra_providers) wait_for_a_provider()
def test_providers_discovery(request, provider): """Tests provider discovery Metadata: test_flag: crud """ provider.discover() flash.assert_message_match('Infrastructure Providers: Discovery successfully initiated') request.addfinalizer(lambda: BaseProvider.clear_provider_by_type(InfraProvider)) wait_for_a_provider()
def test_providers_discovery(request, provider_crud): """Tests provider discovery Metadata: test_flag: crud """ provider.discover_from_provider(provider_crud) flash.assert_message_match('Infrastructure Providers: Discovery successfully initiated') request.addfinalizer(providers.clear_infra_providers) provider.wait_for_a_provider()
def test_providers_discovery(request, provider): """Tests provider discovery Metadata: test_flag: crud """ provider.discover() view = provider.create_view(InfraProvidersView) view.flash.assert_success_message('Infrastructure Providers: Discovery successfully initiated') request.addfinalizer(InfraProvider.clear_providers) wait_for_a_provider()
def test_external_database_appliance(request, virtualcenter_provider): """Tests that one appliance can externally connect to the database of another appliance. Metadata: test_flag: replication """ appl1, appl2 = get_distributed_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: virtualcenter_provider.create() wait_for_a_provider() appl2.ipapp.browser_steal = True with appl2.ipapp: wait_for_a_provider() assert virtualcenter_provider.exists
def test_external_database_appliance(request, provider): """Tests that one appliance can externally connect to the database of another appliance. Metadata: test_flag: replication """ appl1, appl2 = get_distributed_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: provider.create() wait_for_a_provider() appl2.ipapp.browser_steal = True with appl2.ipapp: wait_for_a_provider() assert provider.exists
def test_appliance_replicate_between_regions(request, provider): """Tests that a provider added to an appliance in one region is replicated to the parent appliance in another region. Metadata: test_flag: replication """ appl1, appl2 = get_replication_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: configure_db_replication(appl2.address) provider.create() wait_for_a_provider() appl2.ipapp.browser_steal = True with appl2.ipapp: wait_for_a_provider() assert provider.exists
def test_appliance_replicate_between_regions(request, virtualcenter_provider): """Tests that a provider added to an appliance in one region is replicated to the parent appliance in another region. Metadata: test_flag: replication """ appl1, appl2 = get_replication_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: configure_db_replication(appl2.hostname) virtualcenter_provider.create() wait_for_a_provider() appl2.ipapp.browser_steal = True with appl2.ipapp: wait_for_a_provider() assert virtualcenter_provider.exists
def test_distributed_vm_power_control(request, test_vm, vmware_provider, verify_vm_running, register_event, soft_assert): """Tests that a replication parent appliance can control the power state of a VM being managed by a replication child appliance. Metadata: test_flag: replication """ appl1, appl2 = get_replication_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: configure_db_replication(appl2.address) vmware_provider.create() wait_for_a_provider() appl2.ipapp.browser_steal = True builder = EventBuilder(get_or_create_current_appliance()) base_evt = partial(builder.new_event, target_type='VmOrTemplate', target_name=test_vm.name) with appl2.ipapp: register_event(base_evt(event_type='vm_poweroff'), base_evt(event_type='request_vm_poweroff')) test_vm.power_control_from_cfme(option=test_vm.POWER_OFF, cancel=False) flash.assert_message_contain("Stop initiated") navigate_to(test_vm.provider, 'Details') test_vm.wait_for_vm_state_change(desired_state=test_vm.STATE_OFF, timeout=900) soft_assert(test_vm.find_quadicon().state == 'currentstate-off') soft_assert( not test_vm.provider.mgmt.is_vm_running(test_vm.name), "vm running")
def test_providers_discovery(provider_crud): provider.discover_from_provider(provider_crud) flash.assert_message_match('Infrastructure Providers: Discovery successfully initiated') provider.wait_for_a_provider()
def test_db_restore(request, soft_assert, virtualcenter_provider_crud, ec2_provider_crud, appliance): appl1, appl2 = get_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: # Manage infra,cloud providers and set some roles before taking a DB backup server_info = appliance.server.settings server_info.enable_server_roles('automate') roles = server_info.server_roles_db virtualcenter_provider_crud.setup() wait_for_a_provider() ec2_provider_crud.setup() cloud_provider.wait_for_a_provider() providers_appl1 = appl1.ipapp.managed_known_providers appl1.ipapp.db.backup() # Fetch v2_key and DB backup from the first appliance with appl1.ipapp.ssh_client as ssh: rand_filename = "/tmp/v2_key_{}".format(fauxfactory.gen_alphanumeric()) ssh.get_file("/var/www/miq/vmdb/certs/v2_key", rand_filename) dump_filename = "/tmp/db_dump_{}".format(fauxfactory.gen_alphanumeric()) ssh.get_file("/tmp/evm_db.backup", dump_filename) with appl2.ipapp.ssh_client as ssh: ssh.put_file(rand_filename, "/var/www/miq/vmdb/certs/v2_key") ssh.put_file(dump_filename, "/tmp/evm_db.backup") appl2.ipapp.browser_steal = True with appl2.ipapp: # Restore DB on the second appliance appl2.ipapp.evmserverd.stop() appl2.ipapp.db.drop() appl2.ipapp.db.restore() appl2.ipapp.start_evm_service() appl2.ipapp.wait_for_web_ui() wait_for_a_provider() cloud_provider.wait_for_a_provider() # Assert providers on the second appliance providers_appl2 = appl2.ipapp.managed_known_providers assert set(providers_appl2).issubset(providers_appl1), ( 'Restored DB is missing some providers' ) # Verify that existing provider can detect new VMs on the second appliance vm = provision_vm(request, virtualcenter_provider_crud) soft_assert(vm.find_quadicon().data['state'] == 'currentstate-on') soft_assert(vm.provider.mgmt.is_vm_running(vm.name), "vm running") # Assert server roles on the second appliance for role, is_enabled in server_info.server_roles_ui.iteritems(): if is_enabled: assert roles[role], "Role '{}' is selected but should not be".format(role) else: assert not roles[role], "Role '{}' is not selected but should be".format(role)
def test_providers_discovery(provider_crud): provider.discover_from_provider(provider_crud) flash.assert_message_match( 'Infrastructure Providers: Discovery successfully initiated') provider.wait_for_a_provider()