def test_update_ha_webui(ha_appliances_with_providers, appliance, request, old_version): """ Tests updating an appliance with providers, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high casecomponent: Appliance initialEstimate: 1/4h """ update_appliance(ha_appliances_with_providers[2]) wait_for(do_appliance_versions_match, func_args=(appliance, ha_appliances_with_providers[2]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Cause failover to occur result = ha_appliances_with_providers[0].ssh_client.run_command( 'systemctl stop $APPLIANCE_PG_SERVICE', timeout=15) assert result.success, "Failed to stop APPLIANCE_PG_SERVICE: {}".format(result.output) def is_failover_started(): return ha_appliances_with_providers[2].ssh_client.run_command( "grep 'Starting to execute failover' /var/www/miq/vmdb/log/ha_admin.log").success wait_for(is_failover_started, timeout=450, handle_exception=True, message='Waiting for HA failover') ha_appliances_with_providers[2].wait_for_evm_service() ha_appliances_with_providers[2].wait_for_web_ui() # Verify that existing provider can detect new VMs virtual_crud = provider_app_crud(VMwareProvider, ha_appliances_with_providers[2]) vm = provision_vm(request, virtual_crud) assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_update_embedded_ansible_webui(enabled_embedded_appliance, appliance, old_version): """ Tests updating an appliance which has embedded ansible role enabled, also confirms that the role continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high casecomponent: Appliance initialEstimate: 1/4h """ update_appliance(enabled_embedded_appliance) wait_for(do_appliance_versions_match, func_args=(appliance, enabled_embedded_appliance), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') assert wait_for(func=lambda: enabled_embedded_appliance.is_embedded_ansible_running, num_sec=90) assert wait_for(func=lambda: enabled_embedded_appliance.rabbitmq_server.running, num_sec=60) assert wait_for(func=lambda: enabled_embedded_appliance.nginx.running, num_sec=60) repositories = enabled_embedded_appliance.collections.ansible_repositories name = "example_{}".format(fauxfactory.gen_alpha()) description = "edited_{}".format(fauxfactory.gen_alpha()) try: repository = repositories.create( name=name, url=cfme_data.ansible_links.playbook_repositories.console_db, description=description) except KeyError: pytest.skip("Skipping since no such key found in yaml") view = navigate_to(repository, "Details") refresh = view.toolbar.refresh.click wait_for( lambda: view.entities.summary("Properties").get_text_of("Status").lower() == "successful", timeout=60, fail_func=refresh )
def test_update_replicated_webui(replicated_appliances_with_providers, appliance, request, old_version, soft_assert): """ Tests updating an appliance with providers, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high casecomponent: Appliance initialEstimate: 1/4h """ providers_before_upgrade = set(replicated_appliances_with_providers[0].managed_provider_names) update_appliance(replicated_appliances_with_providers[0]) update_appliance(replicated_appliances_with_providers[1]) wait_for(do_appliance_versions_match, func_args=(appliance, replicated_appliances_with_providers[0]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') wait_for(do_appliance_versions_match, func_args=(appliance, replicated_appliances_with_providers[1]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Assert providers exist after upgrade and replicated to second appliances assert providers_before_upgrade == set( replicated_appliances_with_providers[1].managed_provider_names), 'Providers are missing' # Verify that existing provider can detect new VMs on both apps virtual_crud_appl1 = provider_app_crud(VMwareProvider, replicated_appliances_with_providers[0]) virtual_crud_appl2 = provider_app_crud(VMwareProvider, replicated_appliances_with_providers[1]) vm1 = provision_vm(request, virtual_crud_appl1) vm2 = provision_vm(request, virtual_crud_appl2) soft_assert(vm1.provider.mgmt.does_vm_exist(vm1.name), "vm not provisioned") soft_assert(vm2.provider.mgmt.does_vm_exist(vm2.name), "vm not provisioned")
def test_update_ha_webui(ha_appliances_with_providers, appliance, request, old_version): """ Tests updating an appliance with providers, also confirms that the provisioning continues to function correctly after the update has completed""" update_appliance(ha_appliances_with_providers[2]) wait_for(do_appliance_versions_match, func_args=(appliance, ha_appliances_with_providers[2]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Cause failover to occur result = ha_appliances_with_providers[0].ssh_client.run_command( 'systemctl stop $APPLIANCE_PG_SERVICE', timeout=15) assert result.success, "Failed to stop APPLIANCE_PG_SERVICE: {}".format( result.output) def is_failover_started(): return ha_appliances_with_providers[2].ssh_client.run_command( "grep 'Starting to execute failover' /var/www/miq/vmdb/log/ha_admin.log" ).success wait_for(is_failover_started, timeout=450, handle_exception=True, message='Waiting for HA failover') ha_appliances_with_providers[2].wait_for_evm_service() ha_appliances_with_providers[2].wait_for_web_ui() # Verify that existing provider can detect new VMs virtual_crud = provider_app_crud(VMwareProvider, ha_appliances_with_providers[2]) vm = provision_vm(request, virtual_crud) assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_update_scap_webui(appliance_with_providers, appliance, request, old_version): """ Tests updating an appliance with providers and scap hardened, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high casecomponent: Appliance initialEstimate: 1/4h """ appliance_with_providers.appliance_console.scap_harden_appliance() rules_failures = appliance_with_providers.appliance_console.scap_check_rules() assert not rules_failures, "Some rules have failed, check log" update_appliance(appliance_with_providers) wait_for(do_appliance_versions_match, func_args=(appliance, appliance_with_providers), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Re-harden appliance and confirm rules are applied. rules_failures = appliance_with_providers.appliance_console.scap_check_rules() assert not rules_failures, "Some rules have failed, check log" # Verify that existing provider can detect new VMs on the second appliance virtual_crud = provider_app_crud(VMwareProvider, appliance_with_providers) vm = provision_vm(request, virtual_crud) assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_update_embedded_ansible_webui(enabled_embedded_appliance, appliance, old_version): """ Tests updating an appliance which has embedded ansible role enabled, also confirms that the role continues to function correctly after the update has completed""" update_appliance(enabled_embedded_appliance) wait_for(do_appliance_versions_match, func_args=(appliance, enabled_embedded_appliance), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') assert wait_for( func=lambda: enabled_embedded_appliance.is_embedded_ansible_running, num_sec=90) assert wait_for( func=lambda: enabled_embedded_appliance.is_rabbitmq_running, num_sec=60) assert wait_for(func=lambda: enabled_embedded_appliance.is_nginx_running, num_sec=60) repositories = enabled_embedded_appliance.collections.ansible_repositories name = "example_{}".format(fauxfactory.gen_alpha()) description = "edited_{}".format(fauxfactory.gen_alpha()) repository = repositories.create(name, REPOSITORIES[0], description=description) view = navigate_to(repository, "Details") refresh = view.toolbar.refresh.click wait_for(lambda: view.entities.summary("Properties").get_text_of("Status"). lower() == "successful", timeout=60, fail_func=refresh)
def test_update_distributed_webui(ext_appliances_with_providers, appliance, request, old_version, soft_assert): """ Tests updating an appliance with providers, also confirms that the provisioning continues to function correctly after the update has completed""" update_appliance(ext_appliances_with_providers[0]) wait_for(do_appliance_versions_match, func_args=(appliance, ext_appliances_with_providers[0]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') wait_for(do_appliance_versions_match, func_args=(appliance, ext_appliances_with_providers[1]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Verify that existing provider can detect new VMs on both apps virtual_crud_appl1 = provider_app_crud(VMwareProvider, ext_appliances_with_providers[0]) virtual_crud_appl2 = provider_app_crud(VMwareProvider, ext_appliances_with_providers[1]) vm1 = provision_vm(request, virtual_crud_appl1) vm2 = provision_vm(request, virtual_crud_appl2) soft_assert(vm1.provider.mgmt.does_vm_exist(vm1.name), "vm not provisioned") soft_assert(vm2.provider.mgmt.does_vm_exist(vm2.name), "vm not provisioned")
def test_update_distributed_webui(ext_appliances_with_providers, appliance, request, old_version, soft_assert): """ Tests updating an appliance with providers, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high casecomponent: Appliance initialEstimate: 1/4h """ update_appliance(ext_appliances_with_providers[0]) for updated_appliance in ext_appliances_with_providers: wait_for(do_appliance_versions_match, func_args=(appliance, updated_appliance), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') updated_appliance.evmserverd.wait_for_running() updated_appliance.wait_for_miq_ready() # Verify that existing provider can detect new VMs on both apps virtual_crud_appl1 = provider_app_crud(VMwareProvider, ext_appliances_with_providers[0]) virtual_crud_appl2 = provider_app_crud(VMwareProvider, ext_appliances_with_providers[1]) vm1 = provision_vm(request, virtual_crud_appl1) vm2 = provision_vm(request, virtual_crud_appl2) soft_assert(vm1.provider.mgmt.does_vm_exist(vm1.name), "vm not provisioned") soft_assert(vm2.provider.mgmt.does_vm_exist(vm2.name), "vm not provisioned")
def test_update_scap_webui(appliance_with_providers, appliance, request, old_version): """ Tests updating an appliance with providers and scap hardened, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high casecomponent: Appliance initialEstimate: 1/4h """ appliance_with_providers.appliance_console.scap_harden_appliance() rules_failures = appliance_with_providers.appliance_console.scap_failures() assert not rules_failures, "Some rules have failed, check log" update_appliance(appliance_with_providers) wait_for(do_appliance_versions_match, func_args=(appliance, appliance_with_providers), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Re-harden appliance and confirm rules are applied. rules_failures = appliance_with_providers.appliance_console.scap_failures() assert not rules_failures, "Some rules have failed, check log" # Verify that existing provider can detect new VMs on the second appliance virtual_crud = provider_app_crud(VMwareProvider, appliance_with_providers) vm = provision_vm(request, virtual_crud) assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_update_replicated_webui(replicated_appliances_with_providers, appliance, request, old_version, soft_assert): """ Tests updating an appliance with providers, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high initialEstimate: None """ providers_before_upgrade = set(replicated_appliances_with_providers[0].managed_provider_names) update_appliance(replicated_appliances_with_providers[0]) update_appliance(replicated_appliances_with_providers[1]) wait_for(do_appliance_versions_match, func_args=(appliance, replicated_appliances_with_providers[0]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') wait_for(do_appliance_versions_match, func_args=(appliance, replicated_appliances_with_providers[1]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Assert providers exist after upgrade and replicated to second appliances assert providers_before_upgrade == set( replicated_appliances_with_providers[1].managed_provider_names), 'Providers are missing' # Verify that existing provider can detect new VMs on both apps virtual_crud_appl1 = provider_app_crud(VMwareProvider, replicated_appliances_with_providers[0]) virtual_crud_appl2 = provider_app_crud(VMwareProvider, replicated_appliances_with_providers[1]) vm1 = provision_vm(request, virtual_crud_appl1) vm2 = provision_vm(request, virtual_crud_appl2) soft_assert(vm1.provider.mgmt.does_vm_exist(vm1.name), "vm not provisioned") soft_assert(vm2.provider.mgmt.does_vm_exist(vm2.name), "vm not provisioned")
def test_update_embedded_ansible_webui(enabled_embedded_appliance, appliance, old_version): """ Tests updating an appliance which has embedded ansible role enabled, also confirms that the role continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high initialEstimate: None """ update_appliance(enabled_embedded_appliance) wait_for(do_appliance_versions_match, func_args=(appliance, enabled_embedded_appliance), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') assert wait_for(func=lambda: enabled_embedded_appliance.is_embedded_ansible_running, num_sec=90) assert wait_for(func=lambda: enabled_embedded_appliance.is_rabbitmq_running, num_sec=60) assert wait_for(func=lambda: enabled_embedded_appliance.is_nginx_running, num_sec=60) repositories = enabled_embedded_appliance.collections.ansible_repositories name = "example_{}".format(fauxfactory.gen_alpha()) description = "edited_{}".format(fauxfactory.gen_alpha()) try: repository = repositories.create( name=name, url=cfme_data.ansible_links.playbook_repositories.console_db, description=description) except KeyError: pytest.skip("Skipping since no such key found in yaml") view = navigate_to(repository, "Details") refresh = view.toolbar.refresh.click wait_for( lambda: view.entities.summary("Properties").get_text_of("Status").lower() == "successful", timeout=60, fail_func=refresh )
def test_update_webui(appliance_with_providers, appliance, request, old_version): """ Tests updating an appliance with providers, also confirms that the provisioning continues to function correctly after the update has completed""" update_appliance(appliance_with_providers) wait_for(do_appliance_versions_match, func_args=(appliance, appliance_with_providers), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Verify that existing provider can detect new VMs on the second appliance virtual_crud = provider_app_crud(VMwareProvider, appliance_with_providers) vm = provision_vm(request, virtual_crud) assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_update_webui(appliance_with_providers, appliance, request, old_version): """ Tests updating an appliance with providers, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high initialEstimate: 1/4h """ update_appliance(appliance_with_providers) wait_for(do_appliance_versions_match, func_args=(appliance, appliance_with_providers), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Verify that existing provider can detect new VMs on the second appliance virtual_crud = provider_app_crud(VMwareProvider, appliance_with_providers) vm = provision_vm(request, virtual_crud) assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_update_distributed_webui(ext_appliances_with_providers, appliance, request, old_version, soft_assert): """ Tests updating an appliance with providers, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high initialEstimate: 1/4h """ update_appliance(ext_appliances_with_providers[0]) wait_for(do_appliance_versions_match, func_args=(appliance, ext_appliances_with_providers[0]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') wait_for(do_appliance_versions_match, func_args=(appliance, ext_appliances_with_providers[1]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Verify that existing provider can detect new VMs on both apps virtual_crud_appl1 = provider_app_crud(VMwareProvider, ext_appliances_with_providers[0]) virtual_crud_appl2 = provider_app_crud(VMwareProvider, ext_appliances_with_providers[1]) vm1 = provision_vm(request, virtual_crud_appl1) vm2 = provision_vm(request, virtual_crud_appl2) soft_assert(vm1.provider.mgmt.does_vm_exist(vm1.name), "vm not provisioned") soft_assert(vm2.provider.mgmt.does_vm_exist(vm2.name), "vm not provisioned")