def test_update_distributed_webui(ext_appliances_with_providers, appliance, request, old_version, soft_assert): """ Tests updating an appliance with providers, also confirms that the provisioning continues to function correctly after the update has completed""" update_appliance(ext_appliances_with_providers[0]) wait_for(do_appliance_versions_match, func_args=(appliance, ext_appliances_with_providers[0]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') wait_for(do_appliance_versions_match, func_args=(appliance, ext_appliances_with_providers[1]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Verify that existing provider can detect new VMs on both apps virtual_crud_appl1 = provider_app_crud(VMwareProvider, ext_appliances_with_providers[0]) virtual_crud_appl2 = provider_app_crud(VMwareProvider, ext_appliances_with_providers[1]) vm1 = provision_vm(request, virtual_crud_appl1) vm2 = provision_vm(request, virtual_crud_appl2) soft_assert(vm1.provider.mgmt.does_vm_exist(vm1.name), "vm not provisioned") soft_assert(vm2.provider.mgmt.does_vm_exist(vm2.name), "vm not provisioned")
def test_update_replicated_webui(replicated_appliances_with_providers, appliance, request, old_version, soft_assert): """ Tests updating an appliance with providers, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high casecomponent: Appliance initialEstimate: 1/4h """ providers_before_upgrade = set(replicated_appliances_with_providers[0].managed_provider_names) update_appliance(replicated_appliances_with_providers[0]) update_appliance(replicated_appliances_with_providers[1]) wait_for(do_appliance_versions_match, func_args=(appliance, replicated_appliances_with_providers[0]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') wait_for(do_appliance_versions_match, func_args=(appliance, replicated_appliances_with_providers[1]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Assert providers exist after upgrade and replicated to second appliances assert providers_before_upgrade == set( replicated_appliances_with_providers[1].managed_provider_names), 'Providers are missing' # Verify that existing provider can detect new VMs on both apps virtual_crud_appl1 = provider_app_crud(VMwareProvider, replicated_appliances_with_providers[0]) virtual_crud_appl2 = provider_app_crud(VMwareProvider, replicated_appliances_with_providers[1]) vm1 = provision_vm(request, virtual_crud_appl1) vm2 = provision_vm(request, virtual_crud_appl2) soft_assert(vm1.provider.mgmt.does_vm_exist(vm1.name), "vm not provisioned") soft_assert(vm2.provider.mgmt.does_vm_exist(vm2.name), "vm not provisioned")
def test_update_distributed_webui(ext_appliances_with_providers, appliance, request, old_version, soft_assert): """ Tests updating an appliance with providers, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high casecomponent: Appliance initialEstimate: 1/4h """ update_appliance(ext_appliances_with_providers[0]) for updated_appliance in ext_appliances_with_providers: wait_for(do_appliance_versions_match, func_args=(appliance, updated_appliance), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') updated_appliance.evmserverd.wait_for_running() updated_appliance.wait_for_miq_ready() # Verify that existing provider can detect new VMs on both apps virtual_crud_appl1 = provider_app_crud(VMwareProvider, ext_appliances_with_providers[0]) virtual_crud_appl2 = provider_app_crud(VMwareProvider, ext_appliances_with_providers[1]) vm1 = provision_vm(request, virtual_crud_appl1) vm2 = provision_vm(request, virtual_crud_appl2) soft_assert(vm1.provider.mgmt.does_vm_exist(vm1.name), "vm not provisioned") soft_assert(vm2.provider.mgmt.does_vm_exist(vm2.name), "vm not provisioned")
def test_update_replicated_webui(replicated_appliances_with_providers, appliance, request, old_version, soft_assert): """ Tests updating an appliance with providers, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high initialEstimate: None """ providers_before_upgrade = set(replicated_appliances_with_providers[0].managed_provider_names) update_appliance(replicated_appliances_with_providers[0]) update_appliance(replicated_appliances_with_providers[1]) wait_for(do_appliance_versions_match, func_args=(appliance, replicated_appliances_with_providers[0]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') wait_for(do_appliance_versions_match, func_args=(appliance, replicated_appliances_with_providers[1]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Assert providers exist after upgrade and replicated to second appliances assert providers_before_upgrade == set( replicated_appliances_with_providers[1].managed_provider_names), 'Providers are missing' # Verify that existing provider can detect new VMs on both apps virtual_crud_appl1 = provider_app_crud(VMwareProvider, replicated_appliances_with_providers[0]) virtual_crud_appl2 = provider_app_crud(VMwareProvider, replicated_appliances_with_providers[1]) vm1 = provision_vm(request, virtual_crud_appl1) vm2 = provision_vm(request, virtual_crud_appl2) soft_assert(vm1.provider.mgmt.does_vm_exist(vm1.name), "vm not provisioned") soft_assert(vm2.provider.mgmt.does_vm_exist(vm2.name), "vm not provisioned")
def test_update_ha_webui(ha_appliances_with_providers, appliance, request, old_version): """ Tests updating an appliance with providers, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high casecomponent: Appliance initialEstimate: 1/4h """ update_appliance(ha_appliances_with_providers[2]) wait_for(do_appliance_versions_match, func_args=(appliance, ha_appliances_with_providers[2]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Cause failover to occur result = ha_appliances_with_providers[0].ssh_client.run_command( 'systemctl stop $APPLIANCE_PG_SERVICE', timeout=15) assert result.success, "Failed to stop APPLIANCE_PG_SERVICE: {}".format(result.output) def is_failover_started(): return ha_appliances_with_providers[2].ssh_client.run_command( "grep 'Starting to execute failover' /var/www/miq/vmdb/log/ha_admin.log").success wait_for(is_failover_started, timeout=450, handle_exception=True, message='Waiting for HA failover') ha_appliances_with_providers[2].wait_for_evm_service() ha_appliances_with_providers[2].wait_for_web_ui() # Verify that existing provider can detect new VMs virtual_crud = provider_app_crud(VMwareProvider, ha_appliances_with_providers[2]) vm = provision_vm(request, virtual_crud) assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_update_scap_webui(appliance_with_providers, appliance, request, old_version): """ Tests updating an appliance with providers and scap hardened, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high casecomponent: Appliance initialEstimate: 1/4h """ appliance_with_providers.appliance_console.scap_harden_appliance() rules_failures = appliance_with_providers.appliance_console.scap_check_rules() assert not rules_failures, "Some rules have failed, check log" update_appliance(appliance_with_providers) wait_for(do_appliance_versions_match, func_args=(appliance, appliance_with_providers), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Re-harden appliance and confirm rules are applied. rules_failures = appliance_with_providers.appliance_console.scap_check_rules() assert not rules_failures, "Some rules have failed, check log" # Verify that existing provider can detect new VMs on the second appliance virtual_crud = provider_app_crud(VMwareProvider, appliance_with_providers) vm = provision_vm(request, virtual_crud) assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_update_ha_webui(ha_appliances_with_providers, appliance, request, old_version): """ Tests updating an appliance with providers, also confirms that the provisioning continues to function correctly after the update has completed""" update_appliance(ha_appliances_with_providers[2]) wait_for(do_appliance_versions_match, func_args=(appliance, ha_appliances_with_providers[2]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Cause failover to occur result = ha_appliances_with_providers[0].ssh_client.run_command( 'systemctl stop $APPLIANCE_PG_SERVICE', timeout=15) assert result.success, "Failed to stop APPLIANCE_PG_SERVICE: {}".format( result.output) def is_failover_started(): return ha_appliances_with_providers[2].ssh_client.run_command( "grep 'Starting to execute failover' /var/www/miq/vmdb/log/ha_admin.log" ).success wait_for(is_failover_started, timeout=450, handle_exception=True, message='Waiting for HA failover') ha_appliances_with_providers[2].wait_for_evm_service() ha_appliances_with_providers[2].wait_for_web_ui() # Verify that existing provider can detect new VMs virtual_crud = provider_app_crud(VMwareProvider, ha_appliances_with_providers[2]) vm = provision_vm(request, virtual_crud) assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_update_ha(ha_appliances_with_providers, appliance, update_strategy, request, old_version): """ Tests updating an appliance with providers using webui, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high casecomponent: Appliance initialEstimate: 1/4h Bugzilla: 1704835 """ update_strategy(ha_appliances_with_providers[2]) wait_for(do_appliance_versions_match, func_args=(appliance, ha_appliances_with_providers[2]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') check_db_ha_failover(ha_appliances_with_providers[0], ha_appliances_with_providers[2]) # Verify that existing provider can detect new VMs virtual_crud = provider_app_crud(VMwareProvider, ha_appliances_with_providers[2]) vm = provision_vm(request, virtual_crud) assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_update_scap_webui(appliance_with_providers, appliance, request, old_version): """ Tests updating an appliance with providers and scap hardened, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high casecomponent: Appliance initialEstimate: 1/4h """ appliance_with_providers.appliance_console.scap_harden_appliance() rules_failures = appliance_with_providers.appliance_console.scap_failures() assert not rules_failures, "Some rules have failed, check log" update_appliance(appliance_with_providers) wait_for(do_appliance_versions_match, func_args=(appliance, appliance_with_providers), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Re-harden appliance and confirm rules are applied. rules_failures = appliance_with_providers.appliance_console.scap_failures() assert not rules_failures, "Some rules have failed, check log" # Verify that existing provider can detect new VMs on the second appliance virtual_crud = provider_app_crud(VMwareProvider, appliance_with_providers) vm = provision_vm(request, virtual_crud) assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_update_ha(ha_appliances_with_providers, appliance, update_strategy, request, old_version): """ Tests updating an appliance with providers using webui, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high casecomponent: Appliance initialEstimate: 1/4h """ evm_log = '/var/www/miq/vmdb/log/evm.log' update_strategy(ha_appliances_with_providers[2]) wait_for(do_appliance_versions_match, func_args=(appliance, ha_appliances_with_providers[2]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') if BZ(1704835, forced_streams=get_stream( ha_appliances_with_providers[2].version)).blocks: with LogValidator( evm_log, matched_patterns=[r'Starting database failover monitor'], hostname=ha_appliances_with_providers[2].hostname).waiting( wait=30): ha_appliances_with_providers[2].evm_failover_monitor.restart() assert ha_appliances_with_providers[2].evm_failover_monitor.running with LogValidator( evm_log, matched_patterns=['Starting to execute failover'], hostname=ha_appliances_with_providers[2].hostname).waiting( wait=450): # Cause failover to occur result = ha_appliances_with_providers[0].ssh_client.run_command( 'systemctl stop $APPLIANCE_PG_SERVICE', timeout=15) assert result.success, "Failed to stop APPLIANCE_PG_SERVICE: {}".format( result.output) ha_appliances_with_providers[2].evmserverd.wait_for_running() ha_appliances_with_providers[2].wait_for_web_ui() # Verify that existing provider can detect new VMs virtual_crud = provider_app_crud(VMwareProvider, ha_appliances_with_providers[2]) vm = provision_vm(request, virtual_crud) assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_update_distributed_webui(ext_appliances_with_providers, appliance, request, old_version, soft_assert): """ Tests updating an appliance with providers, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high initialEstimate: 1/4h """ update_appliance(ext_appliances_with_providers[0]) wait_for(do_appliance_versions_match, func_args=(appliance, ext_appliances_with_providers[0]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') wait_for(do_appliance_versions_match, func_args=(appliance, ext_appliances_with_providers[1]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Verify that existing provider can detect new VMs on both apps virtual_crud_appl1 = provider_app_crud(VMwareProvider, ext_appliances_with_providers[0]) virtual_crud_appl2 = provider_app_crud(VMwareProvider, ext_appliances_with_providers[1]) vm1 = provision_vm(request, virtual_crud_appl1) vm2 = provision_vm(request, virtual_crud_appl2) soft_assert(vm1.provider.mgmt.does_vm_exist(vm1.name), "vm not provisioned") soft_assert(vm2.provider.mgmt.does_vm_exist(vm2.name), "vm not provisioned")
def test_update_webui(appliance_with_providers, appliance, request, old_version): """ Tests updating an appliance with providers, also confirms that the provisioning continues to function correctly after the update has completed""" update_appliance(appliance_with_providers) wait_for(do_appliance_versions_match, func_args=(appliance, appliance_with_providers), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Verify that existing provider can detect new VMs on the second appliance virtual_crud = provider_app_crud(VMwareProvider, appliance_with_providers) vm = provision_vm(request, virtual_crud) assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_update_ha(ha_appliances_with_providers, appliance, update_strategy, request, old_version): """ Tests updating an appliance with providers using webui, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high casecomponent: Appliance initialEstimate: 1/4h Bugzilla: 1704835 """ evm_log = '/var/www/miq/vmdb/log/evm.log' update_strategy(ha_appliances_with_providers[2]) wait_for(do_appliance_versions_match, func_args=(appliance, ha_appliances_with_providers[2]), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') with LogValidator( evm_log, matched_patterns=['Starting database failover monitor'], hostname=ha_appliances_with_providers[2].hostname).waiting( wait=60): ha_appliances_with_providers[2].evm_failover_monitor.restart() assert ha_appliances_with_providers[2].evm_failover_monitor.running with LogValidator( evm_log, matched_patterns=['Starting to execute failover'], hostname=ha_appliances_with_providers[2].hostname).waiting( wait=450): # Cause failover to occur ha_appliances_with_providers[0].db_service.stop() ha_appliances_with_providers[2].evmserverd.wait_for_running() ha_appliances_with_providers[2].wait_for_miq_ready() # Verify that existing provider can detect new VMs virtual_crud = provider_app_crud(VMwareProvider, ha_appliances_with_providers[2]) vm = provision_vm(request, virtual_crud) assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_update_webui(appliance_with_providers, appliance, request, old_version): """ Tests updating an appliance with providers, also confirms that the provisioning continues to function correctly after the update has completed Polarion: assignee: jhenner caseimportance: high initialEstimate: 1/4h """ update_appliance(appliance_with_providers) wait_for(do_appliance_versions_match, func_args=(appliance, appliance_with_providers), num_sec=900, delay=20, handle_exception=True, message='Waiting for appliance to update') # Verify that existing provider can detect new VMs on the second appliance virtual_crud = provider_app_crud(VMwareProvider, appliance_with_providers) vm = provision_vm(request, virtual_crud) assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"