def ansible_tower_dialog(request, appliance): """Returns service dialog object.""" rest_resource = ansible_tower_dialog_rest(request, appliance) service_dialogs = appliance.collections.service_dialogs service_dialog = service_dialogs.instantiate( label=rest_resource.label, description=rest_resource.description) yield service_dialog if appliance.version < '5.11' or not GH(8836).blocks: service_dialog.delete_if_exists()
def msg_from_dict(msg_dict) -> EmailMessage: to, = (msg_dict['default_recipient'] if GH( ('ManageIQ/manageiq', 20260)).blocks else msg_dict['approver']) msg = EmailMessage() msg.add_header('from', ', '.join(msg_dict['from'])) msg.add_header('cc', ', '.join(msg_dict['cc'])) msg.add_header('to', to) msg.add_header('rcpttos', ', '.join(chain(msg_dict['cc'], msg_dict['bcc'], [to]))) return msg
'host_name': {'name': host}}, 'customize': {'root_password': pxe_root_password, 'custom_template': {'name': pxe_kickstart}}, 'network': {'vlan': partial_match(pxe_vlan)}, } item_name = fauxfactory.gen_alphanumeric() return appliance.collections.catalog_items.create( provider.catalog_item_type, name=item_name, description="my catalog", display_in=True, catalog=catalog, dialog=dialog, prov_data=provisioning_data) @pytest.mark.rhv1 @pytest.mark.meta(blockers=[GH('ManageIQ/integration_tests:7965'), BZ(1633516, forced_streams=['5.10'], unblock=lambda provider: not provider.one_of(RHEVMProvider))]) @pytest.mark.usefixtures('setup_pxe_servers_vm_prov') def test_pxe_servicecatalog(appliance, setup_provider, provider, catalog_item, request): """Tests RHEV PXE service catalog Metadata: test_flag: pxe, provision Polarion: assignee: nansari initialEstimate: 1/4h """ vm_name = catalog_item.prov_data['catalog']["vm_name"] request.addfinalizer(
from cfme.cloud.provider.ec2 import EC2Provider from cfme.control.explorer.policies import VMControlPolicy from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.blockers import BZ from cfme.utils.blockers import GH from cfme.utils.log import logger from cfme.utils.wait import TimedOutError from cfme.utils.wait import wait_for pytestmark = [ pytest.mark.tier(2), # Only one prov out of the 2 is taken, if not supplying --use-provider=complete pytest.mark.provider([AzureProvider, EC2Provider], required_flags=['timelines', 'events']), pytest.mark.usefixtures('setup_provider'), pytest.mark.meta(blockers=[ GH("ManageIQ/manageiq-providers-amazon:620", unblock=lambda provider: not provider.one_of(EC2Provider)) ]), test_requirements.timelines, test_requirements.events, ] @pytest.fixture(scope="function") def mark_vm_as_appliance(create_vm, appliance): # set diagnostics vm relations_view = navigate_to(create_vm, 'EditManagementEngineRelationship', wait_for_view=0) relations_view.form.server.select_by_visible_text( "{name} ({sid})".format( name=appliance.server.name, sid=appliance.server.sid )
message="Waiting for successful SSH connection after revert") try: result = ssh_client.run_command('test -e snapshot1.txt') assert not result.rc result = ssh_client.run_command('test -e snapshot2.txt') assert result.rc logger.info('Revert to snapshot %s successful', snapshot1.name) except: logger.exception('Revert to snapshot %s Failed', snapshot1.name) ssh_client.close() @pytest.mark.rhv1 @pytest.mark.uncollectif(lambda provider: (provider.one_of(RHEVMProvider) and provider.version < 4), 'Must be RHEVM provider version >= 4') @pytest.mark.meta(blockers=[GH('ManageIQ/integration_tests:6744', unblock=lambda provider: provider.name != 'rhv_cfme_integration')]) def test_verify_revert_snapshot(full_test_vm, provider, soft_assert, register_event, request): """Tests revert snapshot Metadata: test_flag: snapshot, provision """ verify_revert_snapshot(full_test_vm, provider, soft_assert, register_event, request) @pytest.mark.uncollectif(lambda provider: provider.one_of(RHEVMProvider), 'Must NOT be RHEVM provider') def test_revert_active_snapshot(full_test_vm, provider, soft_assert, register_event, request): """Tests revert active snapshot Metadata:
soft_assert( vm_system_type in quadicon_os_icon.lower(), "quad icon: '{}' not in '{}'".format(vm_system_type, quadicon_os_icon)) if ssa_vm.system_type != WINDOWS: compare_linux_vm_data(ssa_vm) else: # Make sure windows-specific data is not empty compare_windows_vm_data(ssa_vm) @pytest.mark.rhv3 @pytest.mark.tier(2) @pytest.mark.long_running @pytest.mark.meta(blockers=[ GH('ManageIQ/integration_tests:8157', unblock=lambda provider: not provider.one_of(RHEVMProvider)) ]) def test_ssa_schedule(ssa_vm, schedule_ssa, soft_assert, vm_system_type): """ Tests SSA can be performed and returns sane results Metadata: test_flag: vm_analysis Polarion: assignee: sbulage casecomponent: SmartState caseimportance: critical initialEstimate: 1/2h """ # Check release and quadicon quadicon_os_icon = ssa_vm.find_quadicon().data['os']
from cfme.base.credential import Credential from cfme.utils.appliance.implementations.ui import navigate_to from cfme.utils.auth import (OpenLDAPAuthProvider, OpenLDAPSAuthProvider, ActiveDirectoryAuthProvider, FreeIPAAuthProvider, AmazonAuthProvider) from cfme.utils.blockers import GH from cfme.utils.conf import auth_data from cfme.utils.log import logger from cfme.utils.wait import wait_for pytestmark = [ pytest.mark.uncollectif(lambda appliance: appliance.is_pod), pytest.mark.meta(blockers=[ GH( 'ManageIQ/integration_tests:6465', # need SSL openldap server unblock=lambda auth_mode, prov_key: not (auth_mode in [ 'external', 'ldaps' ] and auth_data.auth_providers[prov_key].type == 'openldaps')) ]) ] # map auth provider types, auth_modes, and user_types for test matrix # first key level is auth mode # second key level is provider type (auth_provider key in parametrization) # finally, user_types valid for testing on the above combination of provider+mode test_param_maps = { 'amazon': { AmazonAuthProvider.auth_type: { 'user_types': ['username'] } },
network_provider = collection.instantiate(prov_class=NetworkProvider, name=net_manager) collection = appliance.collections.cloud_networks ovn_network = collection.create(test_name, 'tenant', network_provider, net_manager, 'None') yield ovn_network if ovn_network.exists: ovn_network.delete() @pytest.mark.rhv1 @test_requirements.rhev @pytest.mark.meta(blockers=[ GH('ManageIQ/integration_tests:8128'), BZ(1649886, unblock=lambda provider: not provider.one_of(RHEVMProvider)) ]) def test_provision_vm_to_virtual_network(appliance, setup_provider, provider, request, provisioning, network): """ Tests provisioning a vm from a template to a virtual network Metadata: test_flag: provision Polarion: assignee: anikifor casecomponent: Provisioning initialEstimate: 1/4h """ vm_name = random_vm_name('provd')
cloud_and_infra = ProviderFilter(classes=[CloudProvider, InfraProvider], required_fields=[(['cap_and_util', 'test_chargeback'], True)]) not_scvmm = ProviderFilter(classes=[SCVMMProvider], inverted=True) # SCVMM doesn't support C&U not_cloud = ProviderFilter(classes=[CloudProvider], inverted=True) not_ec2_gce = ProviderFilter(classes=[GCEProvider, EC2Provider], inverted=True) pytestmark = [ pytest.mark.tier(2), pytest.mark.provider(gen_func=providers, filters=[cloud_and_infra, not_scvmm], scope='module'), pytest.mark.usefixtures('has_no_providers_modscope', 'setup_provider_modscope'), test_requirements.chargeback, pytest.mark.meta(blockers=[ GH('ManageIQ/manageiq:20237', unblock=lambda provider: not provider.one_of(AzureProvider)) ]) ] # Allowed deviation between the reported value in the Chargeback report and the estimated value. DEV = 1 def cost_comparison(estimate, expected): subbed = re.sub(r'[$,]', r'', expected) return float(estimate - DEV) <= float(subbed) <= float(estimate + DEV) @pytest.fixture(scope="module") def vm_ownership(enable_candu, provider, appliance): # In these tests, chargeback reports are filtered on VM owner.So,VMs have to be
from collections import namedtuple from copy import copy from fauxfactory import gen_alphanumeric, gen_integer import pytest from cfme.containers.provider import ContainersProvider from cfme.utils.version import current_version from cfme.common.provider_views import ContainerProvidersView from cfme.utils.blockers import GH pytestmark = [ pytest.mark.meta( blockers=[GH('ManageIQ/integration_tests:6409', upstream_only=False)]), pytest.mark.uncollectif(lambda: current_version() < "5.8.0.3"), pytest.mark.provider([ContainersProvider], scope='module') ] alphanumeric_name = gen_alphanumeric(10) long_alphanumeric_name = gen_alphanumeric(100) integer_name = str(gen_integer(0, 100000000)) provider_names = alphanumeric_name, integer_name, long_alphanumeric_name DEFAULT_SEC_PROTOCOLS = ( pytest.mark.polarion('CMP-10598')('SSL trusting custom CA'), pytest.mark.polarion('CMP-10597')('SSL without validation'), pytest.mark.polarion('CMP-10599')('SSL')) checked_item = namedtuple('TestItem', ['default_sec_protocol', 'hawkular_sec_protocol']) TEST_ITEMS = ( pytest.mark.polarion('CMP-10593')(checked_item('SSL trusting custom CA',
with update(email_configuration): email_configuration.fields = { k: { 'value': ', '.join(v) } for k, v in test_data.items() } request.addfinalizer(email_configuration.delete_if_exists) yield test_data @pytest.mark.meta(automates=[ 1472844, 1676910, 1818172, 1380197, 1688500, 1702304, 1783511, GH(('ManageIQ/manageiq', 20260)) ]) @pytest.mark.parametrize("action", ["edit", "approve", "deny"]) def test_provision_approval(appliance, provider, vm_name, smtp_test, request, action, soft_assert, email_addresses_configuration): """ Tests provisioning approval. Tests couple of things. * Approve manually * Approve by editing the request to conform Prerequisites: * A provider that can provision. * Automate role enabled * User with e-mail set so you can receive and view them
# Ensure vm is running vm.mgmt.ensure_state(VmState.RUNNING) # Wait for VM be suspended by CFME try: vm.mgmt.wait_for_state(VmState.SUSPENDED, timeout=600, delay=5) except TimedOutError: pytest.fail(f"CFME did not suspend the VM {vm.name}") @pytest.mark.provider( [VMwareProvider, RHEVMProvider, OpenStackProvider, AzureProvider], scope="module", selector=ONE_PER_TYPE) @pytest.mark.meta(blockers=[ GH( "ManageIQ/manageiq:20247", unblock=lambda provider: not provider.one_of(OpenStackProvider), ) ]) def test_action_prevent_event(request, vm, vm_off, policy_for_testing): """ This test tests action 'Prevent current event from proceeding' This test sets the policy that it prevents powering the VM up. Then the vm is powered up and then it waits that VM does not come alive. Metadata: test_flag: actions, provision Polarion: assignee: dgaikwad initialEstimate: 1/6h casecomponent: Control
cells=cells, partial_check=True) order_request.wait_for_request(method='ui') msg = f"Request failed with the message {order_request.row.last_message.text}" assert order_request.is_succeeded(method='ui'), msg myservice = MyService(appliance, catalog_item.name) myservice.retire() @pytest.mark.uncollectif(versioncheck, reason='API V1 not supported since Tower 3.6.') @pytest.mark.ignore_stream('5.10') @pytest.mark.customer_scenario @pytest.mark.meta(automates=[1740814]) @pytest.mark.parametrize('job_type', ['template'], ids=['template_job']) @pytest.mark.meta(blockers=[ GH(10294, unblock=lambda provider: provider.version != Version("3.5")) ]) def test_change_ansible_tower_job_template(catalog_item, job_type, ansible_api_version_change): """ Bugzilla: 1740814 Polarion: assignee: jhenner casecomponent: Services initialEstimate: 1/16h startsin: 5.11 testSteps: 1. Add a Ansible Tower provider 2. Add an Ansible Tower Catalog Item with 'Display in Catalog' Checked
import pytest from widgetastic.utils import partial_match from cfme import test_requirements from cfme.infrastructure.provider import InfraProvider from cfme.infrastructure.pxe import get_pxe_server_from_config, get_template_from_config from cfme.services.service_catalogs import ServiceCatalogs from cfme.utils import testgen from cfme.utils.blockers import GH from cfme.utils.conf import cfme_data from cfme.utils.generators import random_vm_name from cfme.utils.log import logger pytestmark = [ pytest.mark.meta(server_roles="+automate", blockers=[GH('ManageIQ/integration_tests:7479')]), pytest.mark.usefixtures('uses_infra_providers'), test_requirements.service, pytest.mark.tier(2) ] def pytest_generate_tests(metafunc): # Filter out providers without provisioning data or hosts defined argnames, argvalues, idlist = testgen.providers_by_class( metafunc, [InfraProvider], required_fields=[['provisioning', 'pxe_server'], ['provisioning', 'pxe_image'], ['provisioning', 'pxe_image_type'], ['provisioning', 'pxe_kickstart'], ['provisioning', 'pxe_template'], ['provisioning', 'datastore'],
from cfme.cloud.provider.openstack import OpenStackProvider from cfme.infrastructure.provider import InfraProvider from cfme.infrastructure.provider.virtualcenter import VMwareProvider from cfme.markers.env_markers.provider import providers from cfme.services.myservice import MyService from cfme.services.myservice.ssui import DetailsMyServiceView from cfme.utils import ssh from cfme.utils.appliance import ViaSSUI from cfme.utils.blockers import BZ, GH from cfme.utils.conf import credentials from cfme.utils.log import logger from cfme.utils.providers import ProviderFilter from cfme.utils.wait import wait_for pytestmark = [ pytest.mark.meta(server_roles="+automate", blockers=[GH('ManageIQ/integration_tests:7479')]), test_requirements.ssui, pytest.mark.long_running, pytest.mark.provider(gen_func=providers, filters=[ProviderFilter(classes=[InfraProvider, CloudProvider], required_fields=['provisioning'])]) ] @pytest.mark.rhv1 @pytest.mark.parametrize('context', [ViaSSUI]) def test_myservice_crud(appliance, setup_provider, context, order_service): """Test Myservice crud in SSUI. Metadata: test_flag: ssui, services
if not pxe_server.exists(): pxe_server.create() pxe_server.set_pxe_image_type(provisioning['pxe_image'], provisioning['pxe_image_type']) @pytest.fixture(scope="function") def vm_name(): vm_name = 'test_pxe_prov_{}'.format(fauxfactory.gen_alphanumeric()) return vm_name @pytest.mark.rhv1 @pytest.mark.meta(blockers=[BZ(1633516, forced_streams=['5.10'], unblock=lambda provider: not provider.one_of(RHEVMProvider)), GH('ManageIQ/integration_tests:7965')]) def test_pxe_provision_from_template(appliance, provider, vm_name, smtp_test, setup_provider, request, setup_pxe_servers_vm_prov): """Tests provisioning via PXE Metadata: test_flag: pxe, provision suite: infra_provisioning """ # generate_tests makes sure these have values ( pxe_template, host, datastore, pxe_server, pxe_image, pxe_kickstart, pxe_root_password, pxe_image_type, pxe_vlan ) = map(
password=conf.credentials[reg_method]['password'], repo_name=repo, organization=reg_data.get('organization'), use_proxy=use_proxy, proxy_url=proxy_url, proxy_username=proxy_username, proxy_password=proxy_password, set_default_repository=set_default_repo ) red_hat_updates.update_registration(cancel=True) @pytest.mark.rhel_testing @pytest.mark.ignore_stream("upstream") @pytest.mark.meta(automates=[1532201, 1673136]) @pytest.mark.meta(blockers=[GH('ManageIQ/manageiq:20390', unblock=lambda reg_data: not reg_data['url'])]) def test_rh_registration( temp_appliance_preconfig_funcscope, request, reg_method, reg_data, proxy_url, proxy_creds): """ Tests whether an appliance can be registered against RHSM and SAT6 Polarion: assignee: jhenner caseimportance: high casecomponent: Configuration initialEstimate: 1/12h Bugzilla: 1532201 """ repo = reg_data.get('enable_repo') if not repo:
verify_secret=password, ) user_obj = appliance.collections.users.instantiate(name=data['fullname'], credential=credentials) try: request.addfinalizer(user_obj.delete) except CandidateNotFound: logger.warning('User was not found during deletion') return user_obj @pytest.mark.tier(1) @pytest.mark.parametrize( "add_group", ['create_group', 'retrieve_group', 'evm_default_group']) @pytest.mark.meta(blockers=[ GH('ManageIQ/integration_tests:6465', unblock=lambda auth_mode: auth_mode != 'ext_openldap') ]) def test_auth_configure(appliance, request, configure_auth, group, user, data): """This test checks whether different cfme auth modes are working correctly. authmodes tested as part of this test: ext_ipa, ext_openldap, miq_openldap e.g. test_auth[ext-ipa_create-group] Prerequisities: * ``auth_data.yaml`` file Steps: * Make sure corresponding auth_modes data is updated to ``auth_data.yaml`` * this test fetches the auth_modes from yaml and generates tests per auth_mode. """ request.addfinalizer(appliance.server.login_admin) with user: appliance.server.login(user)
collection = appliance.collections.users user = collection.create( name='user_{}'.format(fauxfactory.gen_alphanumeric()), credential=new_credential, email=fauxfactory.gen_email(), groups=new_group, cost_center='Workload', value_assign='Database') yield user if user.exists: user.delete() @pytest.mark.rhv3 @pytest.mark.meta(blockers=[GH('ManageIQ/integration_tests:7385', unblock=lambda provider, appliance_version: not provider.one_of(RHEVMProvider))]) # first arg of parametrize is the list of fixtures or parameters, # second arg is a list of lists, with each one a test is to be generated # sequence is important here # indirect is the list where we define which fixtures are to be passed values indirectly. @pytest.mark.parametrize( ['set_child_tenant_quota', 'custom_prov_data', 'extra_msg', 'approve'], [ [('cpu', '2'), {'hardware': {'num_sockets': '8'}}, '', False], [('storage', '0.01'), {}, '', False], [('memory', '2'), {'hardware': {'memory': '4096'}}, '', False], [('vm', '1'), {'catalog': {'num_vms': '4'}}, '###', True] ], indirect=['set_child_tenant_quota'], ids=['max_cpu', 'max_storage', 'max_memory', 'max_vms']
fauxfactory.gen_alphanumeric()), credential=new_credential, email='*****@*****.**', groups=new_group, cost_center='Workload', value_assign='Database') yield user if user.exists: user.delete() @pytest.mark.rhel_testing @pytest.mark.rhv3 @pytest.mark.meta(blockers=[ GH('ManageIQ/integration_tests:7385', unblock=lambda provider, appliance_version: not provider.one_of( RHEVMProvider) or appliance_version < '5.9') ]) # first arg of parametrize is the list of fixtures or parameters, # second arg is a list of lists, with each one a test is to be generated # sequence is important here # indirect is the list where we define which fixtures are to be passed values indirectly. @pytest.mark.parametrize( ['set_child_tenant_quota', 'custom_prov_data', 'extra_msg', 'approve'], [[('cpu', '2'), { 'hardware': { 'num_sockets': '8' } }, '', False], [('storage', '0.01'), {}, '', False], [('memory', '2'), { 'hardware': {
if not iso_datastore.exists(): iso_datastore.create() # Fails on upstream, BZ1109256 iso_datastore.set_iso_image_type(provisioning['iso_file'], provisioning['iso_image_type']) if not iso_cust_template.exists(): iso_cust_template.create() @pytest.fixture(scope="function") def vm_name(): vm_name = 'test_iso_prov_{}'.format(fauxfactory.gen_alphanumeric(8)) return vm_name @pytest.mark.tier(2) @pytest.mark.meta(blockers=[GH('ManageIQ/integration_tests:6692', unblock=lambda provider: not provider.one_of(RHEVMProvider))]) def test_iso_provision_from_template(appliance, provider, vm_name, smtp_test, datastore_init, request, setup_provider): """Tests ISO provisioning Metadata: test_flag: iso, provision suite: infra_provisioning """ # generate_tests makes sure these have values iso_template, host, datastore, iso_file, iso_kickstart,\ iso_root_password, iso_image_type, vlan = map(provider.data['provisioning'].get, ('pxe_template', 'host', 'datastore', 'iso_file', 'iso_kickstart', 'iso_root_password', 'iso_image_type', 'vlan')) request.addfinalizer(lambda: cleanup_vm(vm_name, provider))
service_catalogs.order() logger.info('Waiting for cfme provision request for service %s', catalog_item.name) cells = {'Description': catalog_item.name} order_request = appliance.collections.requests.instantiate( cells=cells, partial_check=True) order_request.wait_for_request(method='ui') msg = 'Request failed with the message {}'.format( order_request.row.last_message.text) assert order_request.is_succeeded(method='ui'), msg appliance.user.my_settings.default_views.set_default_view( 'Configuration Management Providers', 'List View') @pytest.mark.meta(blockers=[GH('ManageIQ/integration_tests:8610')]) def test_retire_ansible_service(appliance, catalog_item, request, job_type): """Tests retiring of catalog items for Ansible Template and Workflow jobs Metadata: test_flag: provision Polarion: assignee: nachandr casecomponent: Services caseimportance: medium initialEstimate: 1/4h """ service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog, catalog_item.name) service_catalogs.order() logger.info('Waiting for cfme provision request for service %s',
from cfme.infrastructure.provider import InfraProvider from cfme.infrastructure.provider.virtualcenter import VMwareProvider from cfme.markers.env_markers.provider import providers from cfme.services.myservice import MyService from cfme.services.myservice.ssui import DetailsMyServiceView from cfme.utils import ssh from cfme.utils.appliance import ViaSSUI from cfme.utils.blockers import BZ, GH from cfme.utils.conf import credentials from cfme.utils.log import logger from cfme.utils.providers import ProviderFilter from cfme.utils.wait import wait_for pytestmark = [ pytest.mark.meta(server_roles="+automate", blockers=[GH('ManageIQ/integration_tests:7297')]), test_requirements.ssui, pytest.mark.long_running, pytest.mark.provider(gen_func=providers, filters=[ ProviderFilter( classes=[InfraProvider, CloudProvider], required_fields=['provisioning']) ]) ] @pytest.mark.rhv1 @pytest.mark.meta(blockers=[ BZ(1544535, forced_streams=['5.9']), GH('ManageIQ/integration_tests:7297') ])
import cfme.intelligence.chargeback.assignments as cb import cfme.intelligence.chargeback.rates as rates from cfme.infrastructure.provider import InfraProvider from cfme.infrastructure.provider.scvmm import SCVMMProvider from cfme.services.dashboard import Dashboard from cfme import test_requirements from cfme.utils.appliance import ViaSSUI from cfme.utils.blockers import GH from cfme.utils.log import logger from cfme.utils.version import current_version from cfme.utils.wait import wait_for pytestmark = [ pytest.mark.meta(server_roles="+automate", blockers=[GH('ManageIQ/integration_tests:7479')]), pytest.mark.usefixtures('uses_infra_providers'), test_requirements.ssui, pytest.mark.long_running, pytest.mark.ignore_stream("upstream"), pytest.mark.provider([InfraProvider], required_fields=[['provisioning', 'template'], ['provisioning', 'host'], ['provisioning', 'datastore']], scope="module"), ] @pytest.fixture(scope="module") def new_compute_rate(enable_candu): # Create a new Compute Chargeback rate
""" Polarion: assignee: dgaikwad caseimportance: high casecomponent: Appliance initialEstimate: 1/6h """ unconfigured_appliance.appliance_console_cli.configure_appliance_dedicated_db( app_creds['username'], app_creds['password'], 'vmdb_production', unconfigured_appliance.unpartitioned_disks[0]) wait_for(lambda: unconfigured_appliance.db.is_dedicated_active) @test_requirements.ha_proxy @pytest.mark.tier(2) @pytest.mark.meta(blockers=[GH('ManageIQ/manageiq:20455')]) def test_appliance_console_cli_ha_crud(unconfigured_appliances, app_creds): """Tests the configuration of HA with three appliances including failover to standby node Polarion: assignee: jhenner caseimportance: high casecomponent: Appliance initialEstimate: 1h """ apps = unconfigured_appliances app0_ip = apps[0].hostname app1_ip = apps[1].hostname # Configure primary database apps[0].appliance_console_cli.configure_appliance_dedicated_db( app_creds['username'], app_creds['password'], 'vmdb_production',