Beispiel #1
0
def test_update_replicated_webui(replicated_appliances_with_providers, appliance, request,
                                 old_version, soft_assert):
    """ Tests updating an appliance with providers, also confirms that the
            provisioning continues to function correctly after the update has completed

    Polarion:
        assignee: jhenner
        caseimportance: high
        initialEstimate: None
    """
    providers_before_upgrade = set(replicated_appliances_with_providers[0].managed_provider_names)
    update_appliance(replicated_appliances_with_providers[0])
    update_appliance(replicated_appliances_with_providers[1])
    wait_for(do_appliance_versions_match,
             func_args=(appliance, replicated_appliances_with_providers[0]),
             num_sec=900, delay=20, handle_exception=True,
             message='Waiting for appliance to update')
    wait_for(do_appliance_versions_match,
             func_args=(appliance, replicated_appliances_with_providers[1]),
             num_sec=900, delay=20, handle_exception=True,
             message='Waiting for appliance to update')

    # Assert providers exist after upgrade and replicated to second appliances
    assert providers_before_upgrade == set(
        replicated_appliances_with_providers[1].managed_provider_names), 'Providers are missing'
    # Verify that existing provider can detect new VMs on both apps
    virtual_crud_appl1 = provider_app_crud(VMwareProvider, replicated_appliances_with_providers[0])
    virtual_crud_appl2 = provider_app_crud(VMwareProvider, replicated_appliances_with_providers[1])
    vm1 = provision_vm(request, virtual_crud_appl1)
    vm2 = provision_vm(request, virtual_crud_appl2)
    soft_assert(vm1.provider.mgmt.does_vm_exist(vm1.name), "vm not provisioned")
    soft_assert(vm2.provider.mgmt.does_vm_exist(vm2.name), "vm not provisioned")
def test_update_distributed_webui(ext_appliances_with_providers, appliance,
                                  request, old_version, soft_assert):
    """ Tests updating an appliance with providers, also confirms that the
    provisioning continues to function correctly after the update has completed

    Polarion:
        assignee: jhenner
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/4h
    """
    update_appliance(ext_appliances_with_providers[0])
    for updated_appliance in ext_appliances_with_providers:
        wait_for(do_appliance_versions_match,
                 func_args=(appliance, updated_appliance),
                 num_sec=900,
                 delay=20,
                 handle_exception=True,
                 message='Waiting for appliance to update')
        updated_appliance.evmserverd.wait_for_running()
        updated_appliance.wait_for_miq_ready()

    # Verify that existing provider can detect new VMs on both apps
    virtual_crud_appl1 = provider_app_crud(VMwareProvider,
                                           ext_appliances_with_providers[0])
    virtual_crud_appl2 = provider_app_crud(VMwareProvider,
                                           ext_appliances_with_providers[1])
    vm1 = provision_vm(request, virtual_crud_appl1)
    vm2 = provision_vm(request, virtual_crud_appl2)
    soft_assert(vm1.provider.mgmt.does_vm_exist(vm1.name),
                "vm not provisioned")
    soft_assert(vm2.provider.mgmt.does_vm_exist(vm2.name),
                "vm not provisioned")
def test_replication_global_to_remote_new_vm_from_template(
        request, setup_replication):
    """
    Create a new VM from template in remote region from global region

    Polarion:
        assignee: dgaikwad
        casecomponent: Replication
        caseimportance: critical
        initialEstimate: 1/6h
        testSteps:
            1. Configure first appliance as Global.
            2. Configure second appliance as Remote, subscribed to Global.
            3. Create a VM from template in Remote region using the Global appliance.
        expectedResults:
            1.
            2.
            3. VM created in the Remote, no errors.
    """
    remote_app, global_app = setup_replication
    remote_provider = provider_app_crud(RHEVMProvider, remote_app)
    remote_provider.setup()
    assert remote_provider.name in remote_app.managed_provider_names, "Provider is not available."

    new_vm_name = fauxfactory.gen_alphanumeric(start="test_replication_",
                                               length=25).lower()
    global_provider = provider_app_crud(RHEVMProvider, global_app)
    vm = create_vm(provider=global_provider, vm_name=new_vm_name)
    request.addfinalizer(vm.cleanup_on_provider)
    remote_provider.refresh_provider_relationships()
    assert (remote_app.collections.infra_vms.instantiate(
        new_vm_name, remote_provider).exists), (
            f"{new_vm_name} vm is not found in Remote Appliance")
def test_update_replicated_webui(replicated_appliances_with_providers, appliance, request,
                                 old_version, soft_assert):
    """ Tests updating an appliance with providers, also confirms that the
            provisioning continues to function correctly after the update has completed

    Polarion:
        assignee: jhenner
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/4h
    """
    providers_before_upgrade = set(replicated_appliances_with_providers[0].managed_provider_names)
    update_appliance(replicated_appliances_with_providers[0])
    update_appliance(replicated_appliances_with_providers[1])
    wait_for(do_appliance_versions_match,
             func_args=(appliance, replicated_appliances_with_providers[0]),
             num_sec=900, delay=20, handle_exception=True,
             message='Waiting for appliance to update')
    wait_for(do_appliance_versions_match,
             func_args=(appliance, replicated_appliances_with_providers[1]),
             num_sec=900, delay=20, handle_exception=True,
             message='Waiting for appliance to update')

    # Assert providers exist after upgrade and replicated to second appliances
    assert providers_before_upgrade == set(
        replicated_appliances_with_providers[1].managed_provider_names), 'Providers are missing'
    # Verify that existing provider can detect new VMs on both apps
    virtual_crud_appl1 = provider_app_crud(VMwareProvider, replicated_appliances_with_providers[0])
    virtual_crud_appl2 = provider_app_crud(VMwareProvider, replicated_appliances_with_providers[1])
    vm1 = provision_vm(request, virtual_crud_appl1)
    vm2 = provision_vm(request, virtual_crud_appl2)
    soft_assert(vm1.provider.mgmt.does_vm_exist(vm1.name), "vm not provisioned")
    soft_assert(vm2.provider.mgmt.does_vm_exist(vm2.name), "vm not provisioned")
def test_appliance_console_restore_db_ha(request, unconfigured_appliances,
                                         app_creds):
    """Configure HA environment with providers, run backup/restore on configuration,
    Confirm that ha failover continues to work correctly and providers still exist.

    Polarion:
        assignee: jhenner
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/4h
    Bugzilla:
        1740515
    """
    pwd = app_creds["password"]
    appl1, appl2, appl3 = configure_appliances_ha(unconfigured_appliances, pwd)

    # Add infra/cloud providers and create db backup
    provider_app_crud(VMwareProvider, appl3).setup()
    provider_app_crud(EC2Provider, appl3).setup()
    appl1.db.backup()

    providers_before_restore = set(appl3.managed_provider_names)
    # Restore DB on the second appliance
    appl3.evmserverd.stop()
    appl1.rh_postgresql95_repmgr.stop()
    appl2.rh_postgresql95_repmgr.stop()
    appl1.db.drop()
    appl1.db.create()
    fetch_v2key(appl3, appl1)
    restore_db(appl1)

    appl1.appliance_console.reconfigure_primary_replication_node(pwd)
    appl2.appliance_console.reconfigure_standby_replication_node(
        pwd, appl1.hostname)

    appl3.appliance_console.configure_automatic_failover(
        primary_ip=appl1.hostname)
    appl3.evm_failover_monitor.restart()

    appl3.evmserverd.start()
    appl3.wait_for_web_ui()
    # Assert providers still exist after restore
    assert providers_before_restore == set(appl3.managed_provider_names), (
        'Restored DB is missing some providers')

    with LogValidator(evm_log,
                      matched_patterns=['Starting to execute failover'],
                      hostname=appl3.hostname).waiting(timeout=450):
        # Cause failover to occur
        appl1.db_service.stop()

    appl3.evmserverd.wait_for_running()
    appl3.wait_for_web_ui()
    # Assert providers still exist after ha failover
    assert providers_before_restore == set(appl3.managed_provider_names), (
        'Restored DB is missing some providers')
    # Verify that existing provider can detect new VMs after restore/failover
    virtual_crud = provider_app_crud(VMwareProvider, appl3)
    vm = provision_vm(request, virtual_crud)
    assert vm.mgmt.is_running, "vm not running"
def test_update_distributed_webui(ext_appliances_with_providers, appliance,
                                  request, old_version, soft_assert):
    """ Tests updating an appliance with providers, also confirms that the
            provisioning continues to function correctly after the update has completed"""
    update_appliance(ext_appliances_with_providers[0])
    wait_for(do_appliance_versions_match,
             func_args=(appliance, ext_appliances_with_providers[0]),
             num_sec=900,
             delay=20,
             handle_exception=True,
             message='Waiting for appliance to update')
    wait_for(do_appliance_versions_match,
             func_args=(appliance, ext_appliances_with_providers[1]),
             num_sec=900,
             delay=20,
             handle_exception=True,
             message='Waiting for appliance to update')
    # Verify that existing provider can detect new VMs on both apps
    virtual_crud_appl1 = provider_app_crud(VMwareProvider,
                                           ext_appliances_with_providers[0])
    virtual_crud_appl2 = provider_app_crud(VMwareProvider,
                                           ext_appliances_with_providers[1])
    vm1 = provision_vm(request, virtual_crud_appl1)
    vm2 = provision_vm(request, virtual_crud_appl2)
    soft_assert(vm1.provider.mgmt.does_vm_exist(vm1.name),
                "vm not provisioned")
    soft_assert(vm2.provider.mgmt.does_vm_exist(vm2.name),
                "vm not provisioned")
Beispiel #7
0
def two_appliances_one_with_providers(temp_appliances_preconfig_funcscope):
    """Requests two configured appliances from sprout."""
    appl1, appl2 = temp_appliances_preconfig_funcscope

    # Add infra/cloud providers
    provider_app_crud(VMwareProvider, appl1).setup()
    provider_app_crud(OpenStackProvider, appl1).setup()
    return appl1, appl2
Beispiel #8
0
def setup_providers_on_multi_region_cluster(multi_region_cluster, setup_multi_region_cluster):
    remote_appliance, second_remote_appliance = multi_region_cluster.remote_appliances
    vmware_provider = provider_app_crud(VMwareProvider, remote_appliance)
    rhev_provider = provider_app_crud(RHEVMProvider, second_remote_appliance)

    vmware_provider.setup()
    rhev_provider.setup()

    return vmware_provider, rhev_provider
def test_replication_powertoggle(request, provider, setup_replication,
                                 small_template):
    """
    power toggle from global to remote

    Polarion:
        assignee: dgaikwad
        casecomponent: Replication
        caseimportance: critical
        initialEstimate: 1/12h
        testSteps:
            1. Have a VM created in the provider in the Remote region
               subscribed to Global.
            2. Turn the VM off using the Global appliance.
            3. Turn the VM on using the Global appliance.
        expectedResults:
            1.
            2. VM state changes to off in the Remote and Global appliance.
            3. VM state changes to on in the Remote and Global appliance.
    """
    instance_name = fauxfactory.gen_alphanumeric(start="test_replication_",
                                                 length=25).lower()
    remote_app, global_app = setup_replication

    provider_app_crud(OpenStackProvider, remote_app).setup()
    provider.appliance = remote_app

    remote_instance = remote_app.collections.cloud_instances.instantiate(
        instance_name, provider, small_template.name)
    global_instance = global_app.collections.cloud_instances.instantiate(
        instance_name, provider)

    # Create instance
    remote_instance.create_on_provider(find_in_cfme=True)
    request.addfinalizer(remote_instance.cleanup_on_provider)

    remote_instance.wait_for_instance_state_change(
        desired_state=remote_instance.STATE_ON)

    # Power OFF instance using global appliance
    global_instance.power_control_from_cfme(option=global_instance.STOP)

    # Assert instance power off state from both remote and global appliance
    assert global_instance.wait_for_instance_state_change(
        desired_state=global_instance.STATE_OFF).out
    assert remote_instance.wait_for_instance_state_change(
        desired_state=remote_instance.STATE_OFF).out

    # Power ON instance using global appliance
    global_instance.power_control_from_cfme(option=global_instance.START)

    # Assert instance power ON state from both remote and global appliance
    assert global_instance.wait_for_instance_state_change(
        desired_state=global_instance.STATE_ON).out
    assert remote_instance.wait_for_instance_state_change(
        desired_state=global_instance.STATE_ON).out
Beispiel #10
0
def test_appliance_console_restore_db_replicated(
        request, replicated_appliances_with_providers):
    """
    Polarion:
        assignee: jhenner
        caseimportance: high
        casecomponent: Configuration
        initialEstimate: 1h
    """
    appl1, appl2 = replicated_appliances_with_providers
    appl1.db.backup()
    appl2.db.backup()
    providers_before_restore = set(appl1.managed_provider_names)

    # Restore DB on the second appliance
    appl2.evmserverd.stop()
    restore_db(appl2)

    # Restore db on first appliance
    appl1.set_pglogical_replication(replication_type=':none')
    appl1.evmserverd.stop()
    appl1.db.drop()
    appl1.db.create()
    restore_db(appl1)
    appl1.evmserverd.start()
    appl2.evmserverd.start()
    appl1.wait_for_web_ui()
    appl2.wait_for_web_ui()
    appl1.wait_for_api_available()
    appl2.wait_for_api_available()

    # reconfigure replication between appliances which switches to "disabled"
    # during restore
    appl2.set_pglogical_replication(replication_type=':none')
    expected_providers = [] if appl2.version < '5.11' else ['Embedded Ansible']
    assert appl2.managed_provider_names == expected_providers

    # Start the replication again
    appl2.set_pglogical_replication(replication_type=':global')
    appl2.add_pglogical_replication_subscription(appl1.hostname)

    # Assert providers exist after restore and replicated to second appliances
    assert providers_before_restore == set(appl1.managed_provider_names)
    wait_for(
        lambda: providers_before_restore == set(appl2.managed_provider_names),
        timeout=20)

    # Verify that existing provider can detect new VMs on both apps
    virtual_crud_appl1 = provider_app_crud(VMwareProvider, appl1)
    virtual_crud_appl2 = provider_app_crud(VMwareProvider, appl2)
    vm1 = provision_vm(request, virtual_crud_appl1)
    vm2 = provision_vm(request, virtual_crud_appl2)
    assert vm1.mgmt.is_running, "vm not running"
    assert vm2.mgmt.is_running, "vm not running"
def test_update_ha(ha_appliances_with_providers, appliance, update_strategy,
                   request, old_version):
    """ Tests updating an appliance with providers using webui, also confirms that the
            provisioning continues to function correctly after the update has completed

    Polarion:
        assignee: jhenner
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/4h

    Bugzilla:
        1704835
    """
    update_strategy(ha_appliances_with_providers[2])
    wait_for(do_appliance_versions_match,
             func_args=(appliance, ha_appliances_with_providers[2]),
             num_sec=900,
             delay=20,
             handle_exception=True,
             message='Waiting for appliance to update')

    check_db_ha_failover(ha_appliances_with_providers[0],
                         ha_appliances_with_providers[2])

    # Verify that existing provider can detect new VMs
    virtual_crud = provider_app_crud(VMwareProvider,
                                     ha_appliances_with_providers[2])
    vm = provision_vm(request, virtual_crud)
    assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_update_scap_webui(appliance_with_providers, appliance, request,
                           old_version):
    """ Tests updating an appliance with providers and scap hardened, also confirms that the
        provisioning continues to function correctly after the update has completed

    Polarion:
        assignee: jhenner
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/4h
    """
    appliance_with_providers.appliance_console.scap_harden_appliance()
    rules_failures = appliance_with_providers.appliance_console.scap_failures()
    assert not rules_failures, "Some rules have failed, check log"
    update_appliance(appliance_with_providers)

    wait_for(do_appliance_versions_match,
             func_args=(appliance, appliance_with_providers),
             num_sec=900,
             delay=20,
             handle_exception=True,
             message='Waiting for appliance to update')
    # Re-harden appliance and confirm rules are applied.
    rules_failures = appliance_with_providers.appliance_console.scap_failures()
    assert not rules_failures, "Some rules have failed, check log"
    # Verify that existing provider can detect new VMs on the second appliance
    virtual_crud = provider_app_crud(VMwareProvider, appliance_with_providers)
    vm = provision_vm(request, virtual_crud)
    assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
Beispiel #13
0
def get_appliances_with_providers(temp_appliances_unconfig_funcscope_rhevm):
    """Returns two database-owning appliances, configures first appliance with providers and
    takes a backup prior to running tests.

    """
    appl1, appl2 = temp_appliances_unconfig_funcscope_rhevm
    # configure appliances
    appl1.configure(region=0)
    appl1.wait_for_web_ui()
    appl2.configure(region=0)
    appl2.wait_for_web_ui()
    # Add infra/cloud providers and create db backup
    provider_app_crud(VMwareProvider, appl1).setup()
    provider_app_crud(EC2Provider, appl1).setup()
    appl1.db.backup()
    return temp_appliances_unconfig_funcscope_rhevm
def test_update_ha_webui(ha_appliances_with_providers, appliance, request, old_version):
    """ Tests updating an appliance with providers, also confirms that the
            provisioning continues to function correctly after the update has completed

    Polarion:
        assignee: jhenner
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/4h
    """
    update_appliance(ha_appliances_with_providers[2])
    wait_for(do_appliance_versions_match, func_args=(appliance, ha_appliances_with_providers[2]),
             num_sec=900, delay=20, handle_exception=True,
             message='Waiting for appliance to update')
    # Cause failover to occur
    result = ha_appliances_with_providers[0].ssh_client.run_command(
        'systemctl stop $APPLIANCE_PG_SERVICE', timeout=15)
    assert result.success, "Failed to stop APPLIANCE_PG_SERVICE: {}".format(result.output)

    def is_failover_started():
        return ha_appliances_with_providers[2].ssh_client.run_command(
            "grep 'Starting to execute failover' /var/www/miq/vmdb/log/ha_admin.log").success

    wait_for(is_failover_started, timeout=450, handle_exception=True,
             message='Waiting for HA failover')
    ha_appliances_with_providers[2].wait_for_evm_service()
    ha_appliances_with_providers[2].wait_for_web_ui()
    # Verify that existing provider can detect new VMs
    virtual_crud = provider_app_crud(VMwareProvider, ha_appliances_with_providers[2])
    vm = provision_vm(request, virtual_crud)
    assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_replication_remote_to_global_by_ip_pglogical(setup_replication):
    """
    Test replication from remote region to global using any data type
    (provider,event,etc)

    Polarion:
        assignee: dgaikwad
        casecomponent: Replication
        caseimportance: critical
        initialEstimate: 1/4h
        startsin: 5.6
        testSteps:
            1. Have A Remote subscribed to Global.
            2. Create a provider in remote region.
            3. Check the provider appeared in the Global.
        expectedResults:
            1.
            2.
            3. Provider appeared in the Global.
    """
    remote_app, global_app = setup_replication
    provider = provider_app_crud(OpenStackProvider, remote_app)
    provider.setup()

    # Assert the provider is replicated to global appliance
    assert provider.name in global_app.managed_provider_names, "Provider name not found"
Beispiel #16
0
def test_appliance_console_backup_restore_db_local(
        request, two_appliances_one_with_providers):
    """ Test single appliance backup and restore, configures appliance with providers,
    backs up database, restores it to fresh appliance and checks for matching providers.

    Polarion:
        assignee: jhenner
        casecomponent: Configuration
        caseimportance: critical
        initialEstimate: 1/2h
    """
    appl1, appl2 = two_appliances_one_with_providers
    appl1_provider_names = set(appl1.managed_provider_names)

    backup_file_name = f'/tmp/backup.{fauxfactory.gen_alphanumeric()}.dump'
    appl1.db.backup(backup_file_name)

    # Transfer v2_key and db backup from first appliance to second appliance
    fetch_v2key(appl1, appl2)
    fetch_db_local(appl1, appl2, backup_file_name)

    # Restore DB on the second appliance
    appl2.evmserverd.stop()
    appl2.db.drop()
    appl2.db.create()

    with SSHExpect(appl2) as interaction:
        interaction.send('ap')
        interaction.answer('Press any key to continue.', '', timeout=40)
        interaction.answer('Choose the advanced setting: ',
                           VersionPicker({
                               LOWEST: '6',
                               '5.11.2.1': 4
                           }))
        interaction.answer(
            re.escape('Choose the restore database file source: |1| '), '')
        interaction.answer(
            re.escape(
                'Enter the location of the local restore file: |/tmp/evm_db.backup| '
            ), backup_file_name)
        interaction.answer(
            re.escape(
                'Should this file be deleted after completing the restore? (Y/N): '
            ), 'n')
        interaction.answer(
            re.escape(
                'Are you sure you would like to restore the database? (Y/N): '
            ), 'y')
        interaction.answer('Press any key to continue.', '', timeout=80)

    appl2.evmserverd.start()
    appl2.wait_for_web_ui()
    appl2.wait_for_api_available()
    # Assert providers on the second appliance
    assert set(appl2.managed_provider_names) == appl1_provider_names, (
        'Restored DB is missing some providers')
    # Verify that existing provider can detect new VMs on the second appliance
    virtual_crud = provider_app_crud(VMwareProvider, appl2)
    vm = provision_vm(request, virtual_crud)
    assert vm.mgmt.is_running, "vm not running"
def test_update_ha_webui(ha_appliances_with_providers, appliance, request,
                         old_version):
    """ Tests updating an appliance with providers, also confirms that the
            provisioning continues to function correctly after the update has completed"""
    update_appliance(ha_appliances_with_providers[2])
    wait_for(do_appliance_versions_match,
             func_args=(appliance, ha_appliances_with_providers[2]),
             num_sec=900,
             delay=20,
             handle_exception=True,
             message='Waiting for appliance to update')
    # Cause failover to occur
    result = ha_appliances_with_providers[0].ssh_client.run_command(
        'systemctl stop $APPLIANCE_PG_SERVICE', timeout=15)
    assert result.success, "Failed to stop APPLIANCE_PG_SERVICE: {}".format(
        result.output)

    def is_failover_started():
        return ha_appliances_with_providers[2].ssh_client.run_command(
            "grep 'Starting to execute failover' /var/www/miq/vmdb/log/ha_admin.log"
        ).success

    wait_for(is_failover_started,
             timeout=450,
             handle_exception=True,
             message='Waiting for HA failover')
    ha_appliances_with_providers[2].wait_for_evm_service()
    ha_appliances_with_providers[2].wait_for_web_ui()
    # Verify that existing provider can detect new VMs
    virtual_crud = provider_app_crud(VMwareProvider,
                                     ha_appliances_with_providers[2])
    vm = provision_vm(request, virtual_crud)
    assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_update_scap_webui(appliance_with_providers, appliance, request, old_version):
    """ Tests updating an appliance with providers and scap hardened, also confirms that the
        provisioning continues to function correctly after the update has completed

    Polarion:
        assignee: jhenner
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/4h
    """
    appliance_with_providers.appliance_console.scap_harden_appliance()
    rules_failures = appliance_with_providers.appliance_console.scap_check_rules()
    assert not rules_failures, "Some rules have failed, check log"
    update_appliance(appliance_with_providers)

    wait_for(do_appliance_versions_match, func_args=(appliance, appliance_with_providers),
             num_sec=900, delay=20, handle_exception=True,
             message='Waiting for appliance to update')
    # Re-harden appliance and confirm rules are applied.
    rules_failures = appliance_with_providers.appliance_console.scap_check_rules()
    assert not rules_failures, "Some rules have failed, check log"
    # Verify that existing provider can detect new VMs on the second appliance
    virtual_crud = provider_app_crud(VMwareProvider, appliance_with_providers)
    vm = provision_vm(request, virtual_crud)
    assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
Beispiel #19
0
def test_appliance_console_dump_restore_db_local(
        request, get_appliances_with_providers):
    """ Test single appliance dump and restore, configures appliance with providers,
    dumps a database, restores it to fresh appliance and checks for matching providers.

    Polarion:
        assignee: jhenner
        casecomponent: Configuration
        caseimportance: critical
        initialEstimate: 1/2h
    """
    appl1, appl2 = get_appliances_with_providers
    # Transfer v2_key and db backup from first appliance to second appliance
    fetch_v2key(appl1, appl2)
    fetch_db_local(appl1, appl2, "/tmp/evm_db.backup")
    # Restore DB on the second appliance
    appl2.evmserverd.stop()
    appl2.db.drop()
    appl2.db.create()
    restore_db(appl2)
    appl2.evmserverd.start()
    appl2.wait_for_web_ui()
    appl2.wait_for_api_available()

    # Assert providers on the second appliance
    assert set(appl2.managed_provider_names) == set(
        appl1.managed_provider_names), (
            'Restored DB is missing some providers')
    # Verify that existing provider can detect new VMs on the second appliance
    virtual_crud = provider_app_crud(VMwareProvider, appl2)
    vm = provision_vm(request, virtual_crud)
    assert vm.mgmt.is_running, "vm not running"
Beispiel #20
0
def test_appliance_console_restore_pg_basebackup_replicated(
        request, replicated_appliances_with_providers):
    """
    Polarion:
        assignee: jhenner
        casecomponent: Configuration
        caseimportance: critical
        initialEstimate: 1/2h
        upstream: no
    """
    appl1, appl2 = replicated_appliances_with_providers
    appl1.db.backup()
    appl2.db.backup()

    providers_before_restore = set(appl1.managed_provider_names)
    # Restore DB on the second appliance
    appl2.set_pglogical_replication(replication_type=':none')
    appl1.set_pglogical_replication(replication_type=':none')
    appl1.evmserverd.stop()
    appl2.evmserverd.stop()
    appl1.db_service.restart()
    appl2.db_service.restart()
    restore_db(appl1, '/tmp/evm_db.backup')
    restore_db(appl2, '/tmp/evm_db.backup')
    appl1.evmserverd.start()
    appl2.evmserverd.start()
    appl1.wait_for_web_ui()
    appl2.wait_for_web_ui()
    appl1.wait_for_api_available()
    appl2.wait_for_api_available()
    # Assert providers exist after restore and replicated to second appliances
    assert providers_before_restore == set(appl1.managed_provider_names), (
        'Restored DB is missing some providers')
    assert providers_before_restore == set(appl2.managed_provider_names), (
        'Restored DB is missing some providers')
    # Verify that existing provider can detect new VMs on both apps
    virtual_crud_appl1 = provider_app_crud(VMwareProvider, appl1)
    virtual_crud_appl2 = provider_app_crud(VMwareProvider, appl2)
    vm1 = provision_vm(request, virtual_crud_appl1)
    vm2 = provision_vm(request, virtual_crud_appl2)
    assert vm1.mgmt.is_running, "vm not running"
    assert vm2.mgmt.is_running, "vm not running"
def test_appliance_console_restore_db_replicated(
        request, get_replicated_appliances_with_providers):
    """
    Polarion:
        assignee: jhenner
        caseimportance: high
        casecomponent: Configuration
        initialEstimate: 1h
    """
    appl1, appl2 = get_replicated_appliances_with_providers
    providers_before_restore = set(appl1.managed_provider_names)
    # Restore DB on the second appliance
    appl2.evmserverd.stop()

    restore_db(appl2)
    # Restore db on first appliance
    appl1.set_pglogical_replication(replication_type=':none')
    appl1.evmserverd.stop()
    appl1.db.drop()
    appl1.db.create()
    restore_db(appl1)
    appl1.evmserverd.start()
    appl2.evmserverd.start()
    appl1.wait_for_web_ui()
    appl2.wait_for_web_ui()
    # reconfigure replication between appliances, lost during restore
    appl1.set_pglogical_replication(replication_type=':remote')
    appl2.set_pglogical_replication(replication_type=':global')
    appl2.add_pglogical_replication_subscription(appl1.hostname)
    # Assert providers exist after restore and replicated to second appliances
    assert providers_before_restore == set(appl1.managed_provider_names), (
        'Restored DB is missing some providers')
    assert providers_before_restore == set(appl2.managed_provider_names), (
        'Restored DB is missing some providers')
    # Verify that existing provider can detect new VMs on both apps
    virtual_crud_appl1 = provider_app_crud(VMwareProvider, appl1)
    virtual_crud_appl2 = provider_app_crud(VMwareProvider, appl2)
    vm1 = provision_vm(request, virtual_crud_appl1)
    vm2 = provision_vm(request, virtual_crud_appl2)
    assert vm1.mgmt.is_running, "vm not running"
    assert vm2.mgmt.is_running, "vm not running"
Beispiel #22
0
def get_ext_appliances_with_providers(temp_appliances_unconfig_funcscope_rhevm,
                                      app_creds_modscope):
    """Returns two database-owning appliances, configures first appliance with providers and
    takes a backup prior to running tests.

    """
    appl1, appl2 = temp_appliances_unconfig_funcscope_rhevm
    app_ip = appl1.hostname
    # configure appliances
    appl1.configure(region=0)
    appl1.wait_for_web_ui()
    appl2.appliance_console_cli.configure_appliance_external_join(
        app_ip, app_creds_modscope['username'], app_creds_modscope['password'],
        'vmdb_production', app_ip, app_creds_modscope['sshlogin'],
        app_creds_modscope['sshpass'])
    appl2.wait_for_web_ui()
    # Add infra/cloud providers and create db backup
    provider_app_crud(VMwareProvider, appl1).setup()
    provider_app_crud(EC2Provider, appl1).setup()
    appl1.db.backup()
    return temp_appliances_unconfig_funcscope_rhevm
Beispiel #23
0
def test_update_ha(ha_appliances_with_providers, appliance, update_strategy,
                   request, old_version):
    """ Tests updating an appliance with providers using webui, also confirms that the
            provisioning continues to function correctly after the update has completed

    Polarion:
        assignee: jhenner
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/4h
    """
    evm_log = '/var/www/miq/vmdb/log/evm.log'
    update_strategy(ha_appliances_with_providers[2])
    wait_for(do_appliance_versions_match,
             func_args=(appliance, ha_appliances_with_providers[2]),
             num_sec=900,
             delay=20,
             handle_exception=True,
             message='Waiting for appliance to update')

    if BZ(1704835,
          forced_streams=get_stream(
              ha_appliances_with_providers[2].version)).blocks:
        with LogValidator(
                evm_log,
                matched_patterns=[r'Starting database failover monitor'],
                hostname=ha_appliances_with_providers[2].hostname).waiting(
                    wait=30):
            ha_appliances_with_providers[2].evm_failover_monitor.restart()

    assert ha_appliances_with_providers[2].evm_failover_monitor.running

    with LogValidator(
            evm_log,
            matched_patterns=['Starting to execute failover'],
            hostname=ha_appliances_with_providers[2].hostname).waiting(
                wait=450):
        # Cause failover to occur
        result = ha_appliances_with_providers[0].ssh_client.run_command(
            'systemctl stop $APPLIANCE_PG_SERVICE', timeout=15)
        assert result.success, "Failed to stop APPLIANCE_PG_SERVICE: {}".format(
            result.output)

    ha_appliances_with_providers[2].evmserverd.wait_for_running()
    ha_appliances_with_providers[2].wait_for_web_ui()
    # Verify that existing provider can detect new VMs
    virtual_crud = provider_app_crud(VMwareProvider,
                                     ha_appliances_with_providers[2])
    vm = provision_vm(request, virtual_crud)
    assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_update_distributed_webui(ext_appliances_with_providers, appliance, request, old_version,
                                  soft_assert):
    """ Tests updating an appliance with providers, also confirms that the
            provisioning continues to function correctly after the update has completed

    Polarion:
        assignee: jhenner
        caseimportance: high
        initialEstimate: 1/4h
    """
    update_appliance(ext_appliances_with_providers[0])
    wait_for(do_appliance_versions_match, func_args=(appliance, ext_appliances_with_providers[0]),
             num_sec=900, delay=20, handle_exception=True,
             message='Waiting for appliance to update')
    wait_for(do_appliance_versions_match, func_args=(appliance, ext_appliances_with_providers[1]),
             num_sec=900, delay=20, handle_exception=True,
             message='Waiting for appliance to update')
    # Verify that existing provider can detect new VMs on both apps
    virtual_crud_appl1 = provider_app_crud(VMwareProvider, ext_appliances_with_providers[0])
    virtual_crud_appl2 = provider_app_crud(VMwareProvider, ext_appliances_with_providers[1])
    vm1 = provision_vm(request, virtual_crud_appl1)
    vm2 = provision_vm(request, virtual_crud_appl2)
    soft_assert(vm1.provider.mgmt.does_vm_exist(vm1.name), "vm not provisioned")
    soft_assert(vm2.provider.mgmt.does_vm_exist(vm2.name), "vm not provisioned")
Beispiel #25
0
def test_appliance_console_restore_db_external(
        request, get_ext_appliances_with_providers):
    """Configure ext environment with providers, run backup/restore on configuration,
    Confirm that providers still exist after restore and provisioning works.

    Polarion:
        assignee: jhenner
        casecomponent: Configuration
        caseimportance: critical
        initialEstimate: 1h
    """
    appl1, appl2 = get_ext_appliances_with_providers
    # Restore DB on the second appliance
    providers_before_restore = set(appl1.managed_provider_names)
    appl2.evmserverd.stop()
    appl1.evmserverd.stop()
    appl1.db_service.restart()
    appl1.db.drop()
    appl1.db.create()
    restore_db(appl1)
    appl1.evmserverd.start()
    appl1.wait_for_web_ui()
    appl2.evmserverd.start()
    appl2.wait_for_web_ui()
    # Assert providers after restore on both apps
    assert providers_before_restore == set(appl1.managed_provider_names), (
        'Restored DB is missing some providers')
    assert providers_before_restore == set(appl2.managed_provider_names), (
        'Restored DB is missing some providers')
    # Verify that existing provider can detect new VMs on both apps
    virtual_crud_appl1 = provider_app_crud(VMwareProvider, appl1)
    virtual_crud_appl2 = provider_app_crud(VMwareProvider, appl2)
    vm1 = provision_vm(request, virtual_crud_appl1)
    vm2 = provision_vm(request, virtual_crud_appl2)
    assert vm1.mgmt.is_running, "vm not running"
    assert vm2.mgmt.is_running, "vm not running"
def test_update_webui(appliance_with_providers, appliance, request,
                      old_version):
    """ Tests updating an appliance with providers, also confirms that the
        provisioning continues to function correctly after the update has completed"""
    update_appliance(appliance_with_providers)

    wait_for(do_appliance_versions_match,
             func_args=(appliance, appliance_with_providers),
             num_sec=900,
             delay=20,
             handle_exception=True,
             message='Waiting for appliance to update')
    # Verify that existing provider can detect new VMs on the second appliance
    virtual_crud = provider_app_crud(VMwareProvider, appliance_with_providers)
    vm = provision_vm(request, virtual_crud)
    assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
Beispiel #27
0
def test_update_ha(ha_appliances_with_providers, appliance, update_strategy,
                   request, old_version):
    """ Tests updating an appliance with providers using webui, also confirms that the
            provisioning continues to function correctly after the update has completed

    Polarion:
        assignee: jhenner
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/4h

    Bugzilla:
        1704835
    """
    evm_log = '/var/www/miq/vmdb/log/evm.log'
    update_strategy(ha_appliances_with_providers[2])
    wait_for(do_appliance_versions_match,
             func_args=(appliance, ha_appliances_with_providers[2]),
             num_sec=900,
             delay=20,
             handle_exception=True,
             message='Waiting for appliance to update')

    with LogValidator(
            evm_log,
            matched_patterns=['Starting database failover monitor'],
            hostname=ha_appliances_with_providers[2].hostname).waiting(
                wait=60):
        ha_appliances_with_providers[2].evm_failover_monitor.restart()
        assert ha_appliances_with_providers[2].evm_failover_monitor.running

    with LogValidator(
            evm_log,
            matched_patterns=['Starting to execute failover'],
            hostname=ha_appliances_with_providers[2].hostname).waiting(
                wait=450):
        # Cause failover to occur
        ha_appliances_with_providers[0].db_service.stop()

    ha_appliances_with_providers[2].evmserverd.wait_for_running()
    ha_appliances_with_providers[2].wait_for_miq_ready()
    # Verify that existing provider can detect new VMs
    virtual_crud = provider_app_crud(VMwareProvider,
                                     ha_appliances_with_providers[2])
    vm = provision_vm(request, virtual_crud)
    assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_update_webui(appliance_with_providers, appliance, request, old_version):
    """ Tests updating an appliance with providers, also confirms that the
        provisioning continues to function correctly after the update has completed

    Polarion:
        assignee: jhenner
        caseimportance: high
        initialEstimate: 1/4h
    """
    update_appliance(appliance_with_providers)

    wait_for(do_appliance_versions_match, func_args=(appliance, appliance_with_providers),
             num_sec=900, delay=20, handle_exception=True,
             message='Waiting for appliance to update')
    # Verify that existing provider can detect new VMs on the second appliance
    virtual_crud = provider_app_crud(VMwareProvider, appliance_with_providers)
    vm = provision_vm(request, virtual_crud)
    assert vm.provider.mgmt.does_vm_exist(vm.name), "vm not provisioned"
def test_appliance_console_restore_db_nfs(request,
                                          two_appliances_one_with_providers,
                                          utility_vm, utility_vm_nfs_ip):
    """ Test single appliance backup and restore through nfs, configures appliance with providers,
        backs up database, restores it to fresh appliance and checks for matching providers.

    Polarion:
        assignee: jhenner
        casecomponent: Configuration
        caseimportance: critical
        initialEstimate: 1h

    Bugzilla:
        1633573
    """
    appl1, appl2 = two_appliances_one_with_providers
    vm, _, data = utility_vm
    host = utility_vm_nfs_ip
    loc = data['network_share']['nfs']['path']
    nfs_dump_file_name = f'/tmp/backup.{fauxfactory.gen_alphanumeric()}.dump'
    nfs_restore_dir_path = f'nfs://{host}{loc}'
    nfs_restore_file_path = f'{nfs_restore_dir_path}/db_backup/{nfs_dump_file_name}'
    # Transfer v2_key and db backup from first appliance to second appliance
    fetch_v2key(appl1, appl2)

    appl1_provider_names = set(appl1.managed_provider_names)

    # Do the backup
    with SSHExpect(appl1) as interaction:
        appl1.evmserverd.stop()
        interaction.send('ap')
        interaction.answer('Press any key to continue.', '', timeout=40)
        interaction.answer('Choose the advanced setting: ',
                           VersionPicker({
                               LOWEST: '4',
                               '5.11.2.1': 2
                           }))
        interaction.answer(
            r'Choose the backup output file destination: \|1\| ', '2')
        interaction.answer(
            r'Enter the location to save the backup file to: \|.*\| ',
            nfs_dump_file_name)
        # Enter the location to save the remote backup file to
        interaction.answer(
            re.escape(
                'Example: nfs://host.mydomain.com/exported/my_exported_folder/db.backup: '
            ), nfs_restore_dir_path)
        # Running Database backup to nfs://XX.XX.XX.XX/srv/export...
        interaction.answer('Press any key to continue.', '', timeout=240)

    # Restore DB on the second appliance
    appl2.evmserverd.stop()
    appl2.db.drop()
    appl2.db.create()

    with SSHExpect(appl2) as interaction:
        interaction.send('ap')
        interaction.answer('Press any key to continue.', '', timeout=40)
        interaction.answer('Choose the advanced setting: ',
                           VersionPicker({
                               LOWEST: '6',
                               '5.11.2.1': 4
                           }))
        interaction.answer(r'Choose the restore database file source: \|1\| ',
                           '2')
        # Enter the location of the remote backup file
        interaction.answer(
            re.escape(
                'Example: nfs://host.mydomain.com/exported/my_exported_folder/db.backup: '
            ), nfs_restore_file_path)
        interaction.answer(
            r'Are you sure you would like to restore the database\? \(Y\/N\): ',
            'y')
        interaction.answer('Press any key to continue.', '', timeout=80)

    appl2.evmserverd.start()
    appl2.wait_for_miq_ready()
    # Assert providers on the second appliance
    assert set(appl2.managed_provider_names) == appl1_provider_names, (
        'Restored DB is missing some providers')
    # Verify that existing provider can detect new VMs on the second appliance
    virtual_crud = provider_app_crud(VMwareProvider, appl2)
    vm = provision_vm(request, virtual_crud)
    assert vm.mgmt.is_running, "vm not running"
def test_appliance_console_restore_db_ha(request, unconfigured_appliances,
                                         app_creds):
    """Configure HA environment with providers, run backup/restore on configuration,
    Confirm that ha failover continues to work correctly and providers still exist.

    Polarion:
        assignee: jhenner
        caseimportance: high
        casecomponent: Appliance
        initialEstimate: 1/4h
    Bugzilla:
        1693189
        1740515
        1856470
    """
    pwd = app_creds["password"]
    db_appl_1, db_appl_2, webui_appl = configure_appliances_ha(
        unconfigured_appliances, pwd)

    # Add infra/cloud providers and create db backup
    provider_app_crud(VMwareProvider, webui_appl).setup()
    provider_app_crud(OpenStackProvider, webui_appl).setup()
    db_appl_1.db.backup()

    providers_before_restore = set(webui_appl.managed_provider_names)
    # Restore DB on the second appliance
    webui_appl.evmserverd.stop()
    db_appl_1.repmgr.stop()
    db_appl_2.repmgr.stop()
    db_appl_1.db.drop()
    db_appl_1.db.create()
    fetch_v2key(webui_appl, db_appl_1)
    restore_db(db_appl_1)

    db_appl_1.appliance_console.reconfigure_primary_replication_node(pwd)
    db_appl_2.appliance_console.reconfigure_standby_replication_node(
        pwd, db_appl_1.hostname)

    # Reboot the standby node to make sure the repmgr10 starts automatically and is ready to
    # take-over (BZ 1856470).
    db_appl_2.reboot(wait_for_miq_ready=False)

    webui_appl.appliance_console.configure_automatic_failover(
        primary_ip=db_appl_1.hostname)
    webui_appl.evm_failover_monitor.restart()

    webui_appl.evmserverd.start()
    webui_appl.wait_for_miq_ready()
    # Assert providers still exist after restore
    assert providers_before_restore == set(
        webui_appl.managed_provider_names), (
            'Restored DB is missing some providers')

    with LogValidator(evm_log,
                      matched_patterns=['Starting to execute failover'],
                      hostname=webui_appl.hostname).waiting(timeout=450):
        # Cause failover to occur. Note that reboot of the active node may not cause failover to
        # occur as the node can come back again soon enough.
        db_appl_1.db_service.stop()

    webui_appl.evmserverd.wait_for_running()
    webui_appl.wait_for_miq_ready()
    # Assert providers still exist after ha failover
    assert providers_before_restore == set(
        webui_appl.managed_provider_names), (
            'Restored DB is missing some providers')
    # Verify that existing provider can detect new VMs after restore/failover
    virtual_crud = provider_app_crud(VMwareProvider, webui_appl)
    vm = provision_vm(request, virtual_crud)
    assert vm.mgmt.is_running, "vm not running"
Beispiel #31
0
def test_appliance_console_restore_db_samba(request,
                                            two_appliances_one_with_providers,
                                            utility_vm, utility_vm_samba_ip):
    """ Test single appliance backup and restore through smb, configures appliance with providers,
        backs up database, restores it to fresh appliance and checks for matching providers.

    Polarion:
        assignee: jhenner
        casecomponent: Configuration
        caseimportance: critical
        initialEstimate: 1h
    """
    appl1, appl2 = two_appliances_one_with_providers
    _, _, data = utility_vm
    host = utility_vm_samba_ip
    loc = data['network_share']['smb']['path']
    smb_dump_file_name = f'/tmp/backup.{fauxfactory.gen_alphanumeric()}.dump'
    smb_restore_dir_path = f'smb://{host}{loc}'
    smb_restore_file_path = f'{smb_restore_dir_path}/db_backup/{smb_dump_file_name}'

    creds_key = data['network_share']['smb']['credentials']
    pwd = credentials[creds_key]['password']
    usr = credentials[creds_key]['username']
    # Transfer v2_key and db backup from first appliance to second appliance
    fetch_v2key(appl1, appl2)

    appl1_provider_names = set(appl1.managed_provider_names)

    # Do the backup
    with SSHExpect(appl1) as interaction:
        appl1.evmserverd.stop()
        interaction.send('ap')
        interaction.answer('Press any key to continue.', '', timeout=40)
        interaction.answer('Choose the advanced setting: ',
                           VersionPicker({
                               LOWEST: '4',
                               '5.11.2.1': 2
                           }))
        interaction.answer(
            r'Choose the backup output file destination: \|1\| ', '3')
        interaction.answer(
            r'Enter the location to save the backup file to: \|.*\| ',
            smb_dump_file_name)
        # Enter the location to save the remote backup file to
        interaction.answer(
            re.escape(
                'Example: smb://host.mydomain.com/my_share/daily_backup/db.backup: '
            ), smb_restore_dir_path)
        # Enter the username with access to this file.
        interaction.answer(re.escape("Example: 'mydomain.com/user': "******"Example: 'mydomain.com/user': "******"vm not running"
def test_replication_global_region_dashboard(request, setup_replication):
    """
    Global dashboard show remote data

    Polarion:
        assignee: dgaikwad
        casecomponent: Replication
        initialEstimate: 1/4h
        testSteps:
            1. Have a VM created in the provider in the Remote region which is
               subscribed to Global.
            2. Check the dashboard on the Global shows data from the Remote region.
        expectedResults:
            1.
            2. Dashboard on the Global displays data from the Remote region
    """
    remote_app, global_app = setup_replication
    remote_provider = provider_app_crud(InfraProvider, remote_app)
    remote_provider.setup()
    assert remote_provider.name in remote_app.managed_provider_names, "Provider is not available."

    new_vm_name = fauxfactory.gen_alphanumeric(start="test_rep_dashboard",
                                               length=25).lower()
    vm = create_vm(provider=remote_provider, vm_name=new_vm_name)
    request.addfinalizer(vm.cleanup_on_provider)
    data_items = ('EVM: Recently Discovered Hosts',
                  'EVM: Recently Discovered VMs', 'Top Storage Consumers')
    remote_app_data, global_app_data = {}, {}

    def get_table_data(widget):
        ret = [row.name.text for row in widget.contents]
        logger.info("Widget text data:{%s}" % ret)
        return ret

    def data_check(view, table):
        return bool(
            get_table_data(
                view.dashboards("Default Dashboard").widgets(table)))

    view = navigate_to(remote_app.server, "Dashboard")
    for table_name in data_items:
        logger.info("Table name:{%s}" % table_name)
        wait_for(
            data_check,
            func_args=[view, table_name],
            delay=20,
            num_sec=600,
            fail_func=view.dashboards("Default Dashboard").browser.refresh,
            message=f"Waiting for table data item: {table_name} ")
        remote_app_data[table_name] = get_table_data(
            view.dashboards("Default Dashboard").widgets(table_name))

    view = navigate_to(global_app.server, "Dashboard")
    for table_name in data_items:
        logger.info("Table name:{%s}" % table_name)
        wait_for(
            data_check,
            func_args=[view, table_name],
            delay=20,
            num_sec=600,
            fail_func=view.dashboards("Default Dashboard").browser.refresh,
            message=f"Waiting for table data item: {table_name}")

        global_app_data[table_name] = get_table_data(
            view.dashboards("Default Dashboard").widgets(table_name))

    # TODO(ndhandre): Widget not implemented so some widget not checking in this test case they are
    #  'Vendor and Guest OS Chart', 'Top Memory Consumers (weekly)', 'Top CPU Consumers (weekly)',
    #  'Virtual Infrastructure Platforms', 'Guest OS Information'

    assert are_dicts_same(
        remote_app_data, global_app_data), "Dashboard is not same of both app."
Beispiel #33
0
def get_ha_appliances_with_providers(unconfigured_appliances, app_creds):
    """Configure HA environment

    Appliance one configuring dedicated database, 'ap' launch appliance_console,
    '' clear info screen, '7' setup db, '1' Creates v2_key, '1' selects internal db,
    '2' use partition, 'y' create dedicated db, 'pwd' db password, 'pwd' confirm db password + wait
    360 secs and '' finish.

    Appliance two creating region in dedicated database, 'ap' launch appliance_console, '' clear
    info screen, '7' setup db, '2' fetch v2_key, 'app0_ip' appliance ip address, '' default user,
    'pwd' appliance password, '' default v2_key location, '2' create region in external db, '0' db
    region number, 'y' confirm create region in external db 'app0_ip', '' ip and default port for
    dedicated db, '' use default db name, '' default username, 'pwd' db password, 'pwd' confirm db
    password + wait 360 seconds and '' finish.

    Appliance one configuring primary node for replication, 'ap' launch appliance_console, '' clear
    info screen, '8' configure db replication, '1' configure node as primary, '1' cluster node
    number set to 1, '' default dbname, '' default user, 'pwd' password, 'pwd' confirm password,
    'app0_ip' primary appliance ip, confirm settings and wait 360 seconds to configure, '' finish.


    Appliance three configuring standby node for replication, 'ap' launch appliance_console, ''
    clear info screen, '8' configure db replication, '1' configure node as primary, '1' cluster node
    number set to 1, '' default dbname, '' default user, 'pwd' password, 'pwd' confirm password,
    'app0_ip' primary appliance ip, confirm settings and wait 360 seconds to configure, '' finish.


    Appliance two configuring automatic failover of database nodes, 'ap' launch appliance_console,
    '' clear info screen '10' configure application database failover monitor, '1' start failover
    monitor. wait 30 seconds for service to start '' finish.

    """
    appl1, appl2, appl3 = unconfigured_appliances
    app0_ip = appl1.hostname
    app1_ip = appl2.hostname
    pwd = app_creds['password']
    # Configure first appliance as dedicated database
    command_set = ('ap', '', '7', '1', '1', '2', 'y', pwd,
                   TimedCommand(pwd, 360), '')
    appl1.appliance_console.run_commands(command_set)
    wait_for(lambda: appl1.db.is_dedicated_active)
    # Configure EVM webui appliance with create region in dedicated database
    command_set = ('ap', '', '7', '2', app0_ip, '', pwd, '', '2', '0', 'y',
                   app0_ip, '', '', '', TimedCommand(pwd, 360), '')
    appl3.appliance_console.run_commands(command_set)
    appl3.evmserverd.wait_for_running()
    appl3.wait_for_web_ui()
    # Configure primary replication node
    command_set = ('ap', '', '8', '1', '1', '', '', pwd, pwd, app0_ip,
                   TimedCommand('y', 60), '')
    appl1.appliance_console.run_commands(command_set)

    # Configure secondary replication node
    command_set = ('ap', '', '8', '2', '2', app0_ip, '', pwd, '', '2', '2', '',
                   '', pwd, pwd, app0_ip, app1_ip, 'y', TimedCommand('y',
                                                                     60), '')
    appl2.appliance_console.run_commands(command_set)
    #
    # Configure automatic failover on EVM appliance
    with waiting_for_ha_monitor_started(appl3, app1_ip, timeout=300):
        # Configure automatic failover on EVM appliance
        command_set = ('ap', '', '10', TimedCommand('1', 30), '')
        appl3.appliance_console.run_commands(command_set)

    # Add infra/cloud providers and create db backup
    provider_app_crud(VMwareProvider, appl3).setup()
    provider_app_crud(OpenStackProvider, appl3).setup()
    appl1.db.backup()

    return unconfigured_appliances