def test_db_restore(request, soft_assert, virtualcenter_provider_crud, ec2_provider_crud): appl1, appl2 = get_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: # Manage infra,cloud providers and set some roles before taking a DB backup config.set_server_roles(automate=True) roles = config.get_server_roles() virtualcenter_provider_crud.setup() wait_for_a_provider() ec2_provider_crud.setup() cloud_provider.wait_for_a_provider() providers_appl1 = appl1.ipapp.managed_known_providers appl1.ipapp.db.backup() # Fetch v2_key and DB backup from the first appliance with appl1.ipapp.ssh_client as ssh: rand_filename = "/tmp/v2_key_{}".format(fauxfactory.gen_alphanumeric()) ssh.get_file("/var/www/miq/vmdb/certs/v2_key", rand_filename) dump_filename = "/tmp/db_dump_{}".format(fauxfactory.gen_alphanumeric()) ssh.get_file("/tmp/evm_db.backup", dump_filename) with appl2.ipapp.ssh_client as ssh: ssh.put_file(rand_filename, "/var/www/miq/vmdb/certs/v2_key") ssh.put_file(dump_filename, "/tmp/evm_db.backup") appl2.ipapp.browser_steal = True with appl2.ipapp: # Restore DB on the second appliance appl2.ipapp.evmserverd.stop() appl2.ipapp.db.drop() appl2.ipapp.db.restore() appl2.ipapp.start_evm_service() appl2.ipapp.wait_for_web_ui() wait_for_a_provider() cloud_provider.wait_for_a_provider() # Assert providers on the second appliance providers_appl2 = appl2.ipapp.managed_known_providers assert set(providers_appl2).issubset(providers_appl1),\ 'Restored DB is missing some providers' # Verify that existing provider can detect new VMs on the second appliance vm = provision_vm(request, virtualcenter_provider_crud) soft_assert(vm.find_quadicon().data['state'] == 'currentstate-on') soft_assert(vm.provider.mgmt.is_vm_running(vm.name), "vm running") # Assert server roles on the second appliance for role, is_enabled in config.get_server_roles(db=False).iteritems(): if is_enabled: assert roles[role], "Role '{}' is selected but should not be".format(role) else: assert not roles[role], "Role '{}' is not selected but should be".format(role)
def test_db_restore(request, soft_assert): appl1, appl2 = get_appliances() def finalize(): appl1.destroy() appl2.destroy() request.addfinalizer(finalize) appl1.ipapp.browser_steal = True with appl1.ipapp: # Manage infra,cloud providers and set some roles before taking a DB backup config.set_server_roles(automate=True) roles = config.get_server_roles() provider_crud = setup_a_provider('infra', 'virtualcenter', validate=True) provider_mgmt = provider_crud.get_mgmt_system() wait_for_a_provider() setup_a_provider('cloud', 'ec2', validate=True) cloud_provider.wait_for_a_provider() providers_appl1 = appl1.ipapp.managed_providers appl1.ipapp.backup_database() # Fetch v2_key and DB backup from the first appliance with appl1.ipapp.ssh_client as ssh: rand_filename = "/tmp/v2_key_{}".format(fauxfactory.gen_alphanumeric()) ssh.get_file("/var/www/miq/vmdb/certs/v2_key", rand_filename) dump_filename = "/tmp/db_dump_{}".format(fauxfactory.gen_alphanumeric()) ssh.get_file("/tmp/evm_db.backup", dump_filename) with appl2.ipapp.ssh_client as ssh: ssh.put_file(rand_filename, "/var/www/miq/vmdb/certs/v2_key") ssh.put_file(dump_filename, "/tmp/evm_db.backup") appl2.ipapp.browser_steal = True with appl2.ipapp: # Restore DB on the second appliance appl2.ipapp.restore_database() appl2.ipapp.wait_for_web_ui() wait_for_a_provider() cloud_provider.wait_for_a_provider() # Assert providers on the second appliance providers_appl2 = appl2.ipapp.managed_providers assert set(providers_appl2).issubset(providers_appl1),\ 'Restored DB is missing some providers' # Verify that existing provider can detect new VMs on the second appliance vm = provision_vm(request, provider_crud, provider_mgmt) soft_assert(vm.find_quadicon().state == 'currentstate-on') soft_assert(vm.provider_crud.get_mgmt_system().is_vm_running(vm.name), "vm running") # Assert server roles on the second appliance for role, is_enabled in config.get_server_roles(db=False).iteritems(): if is_enabled: assert roles[role], "Role '%s' is selected but should not be" % role else: assert not roles[role], "Role '%s' is not selected but should be" % role
def ensure_websocket_role_disabled(): # TODO: This is a temporary solution until we find something better. roles = configuration.get_server_roles() if 'websocket' in roles and roles['websocket']: logger.info('Disabling the websocket role to ensure we get no intrusive popups') roles['websocket'] = False configuration.set_server_roles(**roles)
def local_setup_provider(request, setup_provider_modscope, provider, vm_analysis_new): if provider.type == 'rhevm' and version.current_version() < "5.5": # See https://bugzilla.redhat.com/show_bug.cgi?id=1300030 pytest.skip( "SSA is not supported on RHEVM for appliances earlier than 5.5 and upstream" ) if GH("ManageIQ/manageiq:6506").blocks: pytest.skip("Upstream provisioning is blocked by" + "https://github.com/ManageIQ/manageiq/issues/6506") if provider.type == 'virtualcenter': store.current_appliance.install_vddk(reboot=True) store.current_appliance.wait_for_web_ui() try: sel.refresh() except AttributeError: # In case no browser is started pass set_host_credentials(request, vm_analysis_new, provider) # Make sure all roles are set roles = configuration.get_server_roles(db=False) roles["automate"] = True roles["smartproxy"] = True roles["smartstate"] = True configuration.set_server_roles(**roles)
def set_roles_for_sm(): roles = get_server_roles() roles["storage_metrics_processor"] = True roles["storage_metrics_collector"] = True roles["storage_metrics_coordinator"] = True roles["storage_inventory"] = True return set_server_roles(**roles)
def test_server_roles_changing(request, roles): """ Test that sets and verifies the server roles in configuration. If there is no forced interrupt, it cleans after, so the roles are intact after the testing. Todo: - Use for parametrization on more roles set? - Change the yaml role list to dict. """ request.addfinalizer(partial(configuration.set_server_roles, **configuration.get_server_roles())) # For reverting back # Set roles configuration.set_server_roles(**roles) flash.assert_no_errors() # Get roles and check for role, is_enabled in configuration.get_server_roles().iteritems(): if is_enabled: assert roles[role], "Role '%s' is selected but should not be" % role else: assert not roles[role], "Role '%s' is not selected but should be" % role
def test_server_roles_changing(request, roles): """ Test that sets and verifies the server roles in configuration. If there is no forced interrupt, it cleans after, so the roles are intact after the testing. Todo: - Use for parametrization on more roles set? - Change the yaml role list to dict. """ request.addfinalizer(partial(configuration.set_server_roles, **configuration.get_server_roles())) # For reverting back # Set roles configuration.set_server_roles(**roles) flash.assert_no_errors() # Get roles and check; use UI because the changes take a while to propagate to DB for role, is_enabled in configuration.get_server_roles(db=False).iteritems(): if is_enabled: assert roles[role], "Role '%s' is selected but should not be" % role else: assert not roles[role], "Role '%s' is not selected but should be" % role
def automate_role_set(request): """ Sets the Automate role that the VM can be provisioned. Sets the Automate role state back when finished the module tests. """ from cfme.configure import configuration roles = configuration.get_server_roles() old_roles = dict(roles) roles["automate"] = True configuration.set_server_roles(**roles) yield configuration.set_server_roles(**old_roles)
def local_setup_provider(request, setup_provider_modscope, provider, vm_analysis_data, appliance): # TODO: allow for vddk parameterization if provider.one_of(VMwareProvider): appliance.install_vddk(reboot=True, wait_for_web_ui_after_reboot=True) appliance.browser.quit_browser() appliance.browser.open_browser() set_host_credentials(request, provider, vm_analysis_data) # Make sure all roles are set roles = configuration.get_server_roles(db=False) roles["automate"] = True roles["smartproxy"] = True roles["smartstate"] = True configuration.set_server_roles(**roles)
def test_server_roles_changing(request, roles): """ Test that sets and verifies the server roles in configuration. If there is no forced interrupt, it cleans after, so the roles are intact after the testing. Note: TODO: - Use for parametrization on more roles set? - Change the yaml role list to dict. """ request.addfinalizer( partial(configuration.set_server_roles, **configuration.get_server_roles())) # For reverting back # Set roles configuration.set_server_roles(db=False, **roles) flash.assert_no_errors() # Get roles and check; use UI because the changes take a while to propagate to DB for role, is_enabled in configuration.get_server_roles( db=False).iteritems(): if is_enabled: assert roles[ role], "Role '{}' is selected but should not be".format(role) else: assert not roles[ role], "Role '{}' is not selected but should be".format(role)
def configure_websocket(): """ Enable websocket role if it is disabled. Currently the fixture cfme/fixtures/base.py, disables the websocket role to avoid intrusive popups. """ roles = configuration.get_server_roles() if 'websocket' in roles and not roles['websocket']: logger.info('Enabling the websocket role to allow console connections') roles['websocket'] = True configuration.set_server_roles(**roles) yield roles['websocket'] = False logger.info('Disabling the websocket role to avoid intrusive popups') configuration.set_server_roles(**roles)
def enable_candu(): try: original_roles = get_server_roles() new_roles = original_roles.copy() new_roles.update({ 'ems_metrics_coordinator': True, 'ems_metrics_collector': True, 'ems_metrics_processor': True, 'automate': False, 'smartstate': False}) set_server_roles(**new_roles) candu.enable_all() yield finally: candu.disable_all() set_server_roles(**original_roles)
def enable_candu(): try: original_roles = get_server_roles() new_roles = original_roles.copy() new_roles.update({ 'ems_metrics_coordinator': True, 'ems_metrics_collector': True, 'ems_metrics_processor': True, 'automate': False, 'smartstate': False }) set_server_roles(**new_roles) candu.enable_all() yield finally: candu.disable_all() set_server_roles(**original_roles)
def local_setup_provider(request, setup_provider_modscope, provider, vm_analysis_data): if provider.type == 'rhevm' and version.current_version() < "5.5": # See https://bugzilla.redhat.com/show_bug.cgi?id=1300030 pytest.skip("SSA is not supported on RHEVM for appliances earlier than 5.5 and upstream") if GH("ManageIQ/manageiq:6506").blocks: pytest.skip("Upstream provisioning is blocked by" + "https://github.com/ManageIQ/manageiq/issues/6506") if provider.type == 'virtualcenter': store.current_appliance.install_vddk(reboot=True, wait_for_web_ui_after_reboot=True) ensure_browser_open() set_host_credentials(request, provider, vm_analysis_data) # Make sure all roles are set roles = configuration.get_server_roles(db=False) roles["automate"] = True roles["smartproxy"] = True roles["smartstate"] = True configuration.set_server_roles(**roles)
def enable_candu(): # C&U data collection consumes a lot of memory and CPU.So, we are disabling some server roles # that are not needed for Chargeback reporting. original_roles = get_server_roles() new_roles = original_roles.copy() new_roles.update({ 'ems_metrics_coordinator': True, 'ems_metrics_collector': True, 'ems_metrics_processor': True, 'automate': False, 'smartstate': False}) set_server_roles(**new_roles) candu.enable_all() yield set_server_roles(**original_roles) candu.disable_all()
def server_roles(fixtureconf): """The fixture that does the work. See usage in :py:mod:`fixtures.server_roles`""" # Disable all server roles # and then figure out which ones should be enabled roles_with_vals = {k: False for k in available_roles} if 'clear_roles' in fixtureconf: # Only user interface roles_with_vals['user_interface'] = True elif 'set_default_roles' in fixtureconf: # The ones specified in YAML roles_list = cfme_data["server_roles"]["sets"]["default"] roles_with_vals.update({k: True for k in roles_list}) elif 'server_roles' in fixtureconf: # The ones that are already enabled and enable/disable the ones specified # -server_role, +server_role or server_role roles_with_vals = get_server_roles() fixture_roles = fixtureconf['server_roles'] if isinstance(fixture_roles, basestring): fixture_roles = fixture_roles.split(' ') for role in fixture_roles: if role.startswith('-'): roles_with_vals[role[1:]] = False elif role.startswith('+'): roles_with_vals[role[1:]] = True else: roles_with_vals[role] = True elif 'server_roles_cfmedata' in fixtureconf: roles_list = cfme_data # Drills down into cfme_data YAML by selector, expecting a list # of roles at the end. A KeyError here probably means the YAML # selector is wrong for selector in fixtureconf['server_roles_cfmedata']: roles_list = roles_list[selector] roles_with_vals.update({k: True for k in roles_list}) else: raise Exception('No server role changes defined.') if not available_roles.issuperset(set(roles_with_vals)): unknown_roles = ', '.join(set(roles_with_vals) - available_roles) raise Exception('Unknown server role(s): {}'.format(unknown_roles)) set_server_roles(**roles_with_vals)
def enable_candu(db): # C&U data collection consumes a lot of memory and CPU.So, we are disabling some server roles # that are not needed for Chargeback reporting. original_roles = get_server_roles() new_roles = original_roles.copy() new_roles.update({ 'ems_metrics_coordinator': True, 'ems_metrics_collector': True, 'ems_metrics_processor': True, 'automate': False, 'smartstate': False}) set_server_roles(**new_roles) candu.enable_all() yield candu.disable_all() set_server_roles(**original_roles)
def add_server_roles(server_roles, server_roles_mode="add"): # Disable all server roles # and then figure out which ones should be enabled roles_with_vals = {k: False for k in available_roles} if server_roles is None: # Only user interface roles_with_vals['user_interface'] = True elif server_roles == "default": # The ones specified in YAML roles_list = cfme_data["server_roles"]["sets"]["default"] roles_with_vals.update({k: True for k in roles_list}) elif server_roles_mode == "add": # The ones that are already enabled and enable/disable the ones specified # -server_role, +server_role or server_role roles_with_vals = get_server_roles() if isinstance(server_roles, basestring): server_roles = server_roles.split(' ') for role in server_roles: if role.startswith('-'): roles_with_vals[role[1:]] = False elif role.startswith('+'): roles_with_vals[role[1:]] = True else: roles_with_vals[role] = True elif server_roles_mode == "cfmedata": roles_list = cfme_data # Drills down into cfme_data YAML by selector, expecting a list # of roles at the end. A KeyError here probably means the YAML # selector is wrong for selector in server_roles: roles_list = roles_list[selector] roles_with_vals.update({k: True for k in roles_list}) else: raise Exception('No server role changes defined.') if not available_roles.issuperset(set(roles_with_vals)): unknown_roles = ', '.join(set(roles_with_vals) - available_roles) raise Exception('Unknown server role(s): {}'.format(unknown_roles)) set_server_roles(**roles_with_vals)
def resource_usage(vm_ownership, appliance, provider): # Retrieve resource usage values from metric_rollups table. average_cpu_used_in_mhz = 0 average_memory_used_in_mb = 0 average_network_io = 0 average_disk_io = 0 average_storage_used = 0 consumed_hours = 0 vm_name = provider.data['cap_and_util']['chargeback_vm'] metrics = appliance.db.client['metrics'] rollups = appliance.db.client['metric_rollups'] ems = appliance.db.client['ext_management_systems'] logger.info('Deleting METRICS DATA from metrics and metric_rollups tables') appliance.db.client.session.query(metrics).delete() appliance.db.client.session.query(rollups).delete() provider_id = appliance.db.client.session.query(ems).filter( ems.name == provider.name).first().id # Chargeback reporting is done on hourly and daily rollup values and not real-time values.So, we # are capturing C&U data and forcing hourly rollups by running these commands through # the Rails console. def verify_records_metrics_table(appliance, provider): # Verify that rollups are present in the metric_rollups table. vm_name = provider.data['cap_and_util']['chargeback_vm'] ems = appliance.db.client['ext_management_systems'] metrics = appliance.db.client['metrics'] rc, out = appliance.ssh_client.run_rails_command( "\"vm = Vm.where(:ems_id => {}).where(:name => {})[0];\ vm.perf_capture('realtime', 1.hour.ago.utc, Time.now.utc)\"". format(provider_id, repr(vm_name))) assert rc == 0, "Failed to capture VM C&U data:".format(out) with appliance.db.client.transaction: result = (appliance.db.client.session.query(metrics.id).join( ems, metrics.parent_ems_id == ems.id).filter( metrics.capture_interval_name == 'realtime', metrics.resource_name == vm_name, ems.name == provider.name, metrics.timestamp >= date.today())) for record in appliance.db.client.session.query(metrics).filter( metrics.id.in_(result.subquery())): if record.cpu_usagemhz_rate_average: return True return False wait_for(verify_records_metrics_table, [appliance, provider], timeout=600, fail_condition=False, message='Waiting for VM real-time data') # New C&U data may sneak in since 1)C&U server roles are running and 2)collection for clusters # and hosts is on.This would mess up our Chargeback calculations, so we are disabling C&U # collection after data has been fetched for the last hour. original_roles = get_server_roles() new_roles = original_roles.copy() new_roles.update({ 'ems_metrics_coordinator': False, 'ems_metrics_collector': False }) set_server_roles(**new_roles) rc, out = appliance.ssh_client.run_rails_command( "\"vm = Vm.where(:ems_id => {}).where(:name => {})[0];\ vm.perf_rollup_range(1.hour.ago.utc, Time.now.utc,'realtime')\"". format(provider_id, repr(vm_name))) assert rc == 0, "Failed to rollup VM C&U data:".format(out) wait_for(verify_records_rollups_table, [appliance, provider], timeout=600, fail_condition=False, message='Waiting for hourly rollups') # Since we are collecting C&U data for > 1 hour, there will be multiple hourly records per VM # in the metric_rollups DB table.The values from these hourly records are summed up. with appliance.db.client.transaction: result = (appliance.db.client.session.query(rollups.id).join( ems, rollups.parent_ems_id == ems.id).filter( rollups.capture_interval_name == 'hourly', rollups.resource_name == vm_name, ems.name == provider.name, rollups.timestamp >= date.today())) for record in appliance.db.client.session.query(rollups).filter( rollups.id.in_(result.subquery())): consumed_hours = consumed_hours + 1 if (record.cpu_usagemhz_rate_average or record.cpu_usage_rate_average or record.derived_memory_used or record.net_usage_rate_average or record.disk_usage_rate_average): average_cpu_used_in_mhz = average_cpu_used_in_mhz + record.cpu_usagemhz_rate_average average_memory_used_in_mb = average_memory_used_in_mb + record.derived_memory_used average_network_io = average_network_io + record.net_usage_rate_average average_disk_io = average_disk_io + record.disk_usage_rate_average for record in appliance.db.client.session.query(rollups).filter( rollups.id.in_(result.subquery())): if record.derived_vm_used_disk_storage: average_storage_used = average_storage_used + record.derived_vm_used_disk_storage # Convert storage used in Bytes to GB average_storage_used = average_storage_used * math.pow(2, -30) return { "average_cpu_used_in_mhz": average_cpu_used_in_mhz, "average_memory_used_in_mb": average_memory_used_in_mb, "average_network_io": average_network_io, "average_disk_io": average_disk_io, "average_storage_used": average_storage_used, "consumed_hours": consumed_hours }