def test_workload_capacity_and_utilization(request, scenario, appliance): """Runs through provider based scenarios enabling C&U and running for a set period of time. Memory Monitor creates graphs and summary at the end of each scenario. Polarion: assignee: rhcf3_machine initialEstimate: 1/4h """ from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) appliance.clean_appliance() quantifiers = {} scenario_data = {'appliance_ip': appliance.hostname, 'appliance_name': conf.cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-cap-and-util', 'test_name': 'Capacity and Utilization', 'appliance_roles': ','.join(roles_cap_and_util), 'scenario': scenario} monitor_thread = SmemMemoryMonitor(appliance.ssh_client, scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(poll_interval=2) appliance.update_server_roles({role: True for role in roles_cap_and_util}) for provider in scenario['providers']: get_crud(provider).create_rest() logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) appliance.set_cap_and_util_all_via_rails() # Variable amount of time for C&U collections/processing total_time = scenario['total_time'] starttime = time.time() elapsed_time = 0 while (elapsed_time < total_time): elapsed_time = time.time() - starttime time_left = total_time - elapsed_time logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if (time_left > 0 and time_left < 300): time.sleep(time_left) elif time_left > 0: time.sleep(300) quantifiers['Elapsed_Time'] = round(elapsed_time, 2) logger.info('Test Ending...')
def test_workload_memory_leak(request, scenario, appliance, provider): """Runs through provider based scenarios setting one worker instance and maximum threshold and running for a set period of time. Memory Monitor creates graphs and summary info.""" from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) appliance.clean_appliance() quantifiers = {} scenario_data = { 'appliance_ip': appliance.hostname, 'appliance_name': conf.cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-memory-leak', 'test_name': 'Memory Leak', 'appliance_roles': ','.join(roles_memory_leak), 'scenario': scenario } monitor_thread = SmemMemoryMonitor(appliance.ssh_client, scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info( 'Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(poll_interval=2) appliance.update_server_roles({role: True for role in roles_memory_leak}) prepare_workers(appliance) provider.create() total_time = scenario['total_time'] starttime = time.time() elapsed_time = 0 while (elapsed_time < total_time): elapsed_time = time.time() - starttime time_left = total_time - elapsed_time logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if (time_left > 0 and time_left < 300): time.sleep(time_left) elif time_left > 0: time.sleep(300) quantifiers['Elapsed_Time'] = round(elapsed_time, 2) logger.info('Test Ending...')
def test_workload_memory_leak(request, scenario, appliance, provider): """Runs through provider based scenarios setting one worker instance and maximum threshold and running for a set period of time. Memory Monitor creates graphs and summary info. Polarion: assignee: rhcf3_machine casecomponent: CandU initialEstimate: 1/4h """ from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) appliance.clean_appliance() quantifiers = {} scenario_data = {'appliance_ip': appliance.hostname, 'appliance_name': conf.cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-memory-leak', 'test_name': 'Memory Leak', 'appliance_roles': ','.join(roles_memory_leak), 'scenario': scenario} monitor_thread = SmemMemoryMonitor(appliance.ssh_client, scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(poll_interval=2) appliance.update_server_roles({role: True for role in roles_memory_leak}) prepare_workers(appliance) provider.create() total_time = scenario['total_time'] starttime = time.time() elapsed_time = 0 while (elapsed_time < total_time): elapsed_time = time.time() - starttime time_left = total_time - elapsed_time logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if (time_left > 0 and time_left < 300): time.sleep(time_left) elif time_left > 0: time.sleep(300) quantifiers['Elapsed_Time'] = round(elapsed_time, 2) logger.info('Test Ending...')
def test_workload_capacity_and_utilization(request, scenario, appliance): """Runs through provider based scenarios enabling C&U and running for a set period of time. Memory Monitor creates graphs and summary at the end of each scenario.""" from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) appliance.clean_appliance() quantifiers = {} scenario_data = {'appliance_ip': appliance.hostname, 'appliance_name': conf.cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-cap-and-util', 'test_name': 'Capacity and Utilization', 'appliance_roles': ','.join(roles_cap_and_util), 'scenario': scenario} monitor_thread = SmemMemoryMonitor(appliance.ssh_client, scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(poll_interval=2) appliance.update_server_roles({role: True for role in roles_cap_and_util}) for provider in scenario['providers']: get_crud(provider).create_rest() logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) appliance.set_cap_and_util_all_via_rails() # Variable amount of time for C&U collections/processing total_time = scenario['total_time'] starttime = time.time() elapsed_time = 0 while (elapsed_time < total_time): elapsed_time = time.time() - starttime time_left = total_time - elapsed_time logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if (time_left > 0 and time_left < 300): time.sleep(time_left) elif time_left > 0: time.sleep(300) quantifiers['Elapsed_Time'] = round(elapsed_time, 2) logger.info('Test Ending...')
def test_idle(appliance, request, scenario): """Runs an appliance at idle with specific roles turned on for specific amount of time. Memory Monitor creates graphs and summary at the end of the scenario. Polarion: assignee: rhcf3_machine casecomponent: CandU initialEstimate: 1/4h """ from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) appliance.clean_appliance() quantifiers = {} scenario_data = { 'appliance_ip': appliance.hostname, 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-idle', 'test_name': 'Idle with {} Roles'.format(scenario['name']), 'appliance_roles': ', '.join(scenario['roles']), 'scenario': scenario } monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data) def cleanup_workload(from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info( 'Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer( lambda: cleanup_workload(from_ts, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(poll_interval=2) appliance.update_server_roles({role: True for role in scenario['roles']}) s_time = scenario['total_time'] logger.info('Idling appliance for {}s'.format(s_time)) time.sleep(s_time) quantifiers['Elapsed_Time'] = s_time logger.info('Test Ending...')
def test_idle(appliance, request, scenario): """Runs an appliance at idle with specific roles turned on for specific amount of time. Memory Monitor creates graphs and summary at the end of the scenario. Polarion: assignee: rhcf3_machine casecomponent: CandU initialEstimate: 1/4h """ from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) appliance.clean_appliance() quantifiers = {} scenario_data = {'appliance_ip': appliance.hostname, 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-idle', 'test_name': 'Idle with {} Roles'.format(scenario['name']), 'appliance_roles': ', '.join(scenario['roles']), 'scenario': scenario} monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data) def cleanup_workload(from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(from_ts, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(poll_interval=2) appliance.update_server_roles({role: True for role in scenario['roles']}) s_time = scenario['total_time'] logger.info('Idling appliance for {}s'.format(s_time)) time.sleep(s_time) quantifiers['Elapsed_Time'] = s_time logger.info('Test Ending...')
def test_workload_capacity_and_utilization_rep(appliance, request, scenario, setup_perf_provider): """Runs through provider based scenarios enabling C&U and replication, run for a set period of time. Memory Monitor creates graphs and summary at the end of each scenario. Polarion: assignee: rhcf3_machine casecomponent: CandU initialEstimate: 1/4h """ from_ts = int(time.time() * 1000) ssh_client = appliance.ssh_client() ssh_master_args = { 'hostname': scenario['replication_master']['ip_address'], 'username': scenario['replication_master']['ssh']['username'], 'password': scenario['replication_master']['ssh']['password'] } master_appliance = IPAppliance( hostname=scenario['replication_master']['ip_address'], openshift_creds=ssh_master_args) ssh_client_master = SSHClient(**ssh_master_args) logger.debug('Scenario: {}'.format(scenario['name'])) is_pglogical = True if scenario['replication'] == 'pglogical' else False # Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario appliance.set_pglogical_replication(replication_type=':none') # Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace # under test is cleaned first, followed by master appliance sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log') sshtail_evm.set_initial_file_end() logger.info(f'Clean appliance under test ({ssh_client})') appliance.clean_appliance() logger.info(f'Clean master appliance ({ssh_client_master})') master_appliance.clean_appliance() # Clean Replication master appliance if is_pglogical: scenario_data = { 'appliance_ip': appliance.hostname, 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-cap-and-util-rep', 'test_name': 'Capacity and Utilization Replication (pgLogical)', 'appliance_roles': ', '.join(roles_cap_and_util_rep), 'scenario': scenario } else: scenario_data = { 'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-cap-and-util-rep', 'test_name': 'Capacity and Utilization Replication (RubyRep)', 'appliance_roles': ', '.join(roles_cap_and_util_rep), 'scenario': scenario } quantifiers = {} monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info(f'Finished cleaning up monitoring thread in {timediff}') request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(evm_tail=sshtail_evm, poll_interval=2) appliance.update_server_roles( {role: True for role in roles_cap_and_util_rep}) for provider in scenario['providers']: get_crud(provider).create_rest() logger.info('Sleeping for Refresh: {}s'.format( scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) appliance.set_cap_and_util_all_via_rails() # Configure Replication if is_pglogical: # Setup appliance under test to :remote appliance.set_pglogical_replication(replication_type=':remote') # Setup master appliance to :global master_appliance.set_pglogical_replication(replication_type=':global') # Setup master to subscribe: master_appliance.add_pglogical_replication_subscription( ssh_client_master, appliance.hostname) else: # Setup local towards Master appliance.set_rubyrep_replication( scenario['replication_master']['ip_address']) # Force uninstall rubyrep for this region from master (Unsure if still needed) # ssh_client.run_rake_command('evm:dbsync:uninstall') # time.sleep(30) # Wait to quiecse # Turn on DB Sync role appliance.update_server_roles( {role: True for role in roles_cap_and_util_rep}) # Variable amount of time for C&U collections/processing total_time = scenario['total_time'] starttime = time.time() elapsed_time = 0 while (elapsed_time < total_time): elapsed_time = time.time() - starttime time_left = total_time - elapsed_time logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if (time_left > 0 and time_left < 300): time.sleep(time_left) elif time_left > 0: time.sleep(300) # Turn off replication: if is_pglogical: appliance.set_pglogical_replication(replication_type=':none') else: appliance.update_server_roles( {role: True for role in roles_cap_and_util_rep}) quantifiers['Elapsed_Time'] = round(elapsed_time, 2) logger.info('Test Ending...')
def test_refresh_providers(appliance, request, scenario): """ Refreshes providers then waits for a specific amount of time. Memory Monitor creates graphs and summary at the end of the scenario. Polarion: assignee: rhcf3_machine initialEstimate: 1/4h """ from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) appliance.clean_appliance() quantifiers = {} scenario_data = { 'appliance_ip': appliance.hostname, 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-refresh-providers', 'test_name': 'Refresh Providers', 'appliance_roles': ', '.join(roles_refresh_providers), 'scenario': scenario } monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(poll_interval=2) appliance.update_server_roles({role: True for role in roles_refresh_providers}) for prov in scenario['providers']: get_crud(prov).create_rest() # Variable amount of time for refresh workload total_time = scenario['total_time'] starttime = time.time() time_between_refresh = scenario['time_between_refresh'] total_refreshed_providers = 0 while ((time.time() - starttime) < total_time): start_refresh_time = time.time() appliance.rest_api.collections.providers.reload() for prov in appliance.rest_api.collections.providers.all: prov.action.reload() total_refreshed_providers += 1 iteration_time = time.time() refresh_time = round(iteration_time - start_refresh_time, 2) elapsed_time = iteration_time - starttime logger.debug('Time to Queue Refreshes: {}'.format(refresh_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if refresh_time < time_between_refresh: wait_diff = time_between_refresh - refresh_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_refresh): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn('Time to Queue Refreshes ({}) exceeded time between ' '({})'.format(refresh_time, time_between_refresh)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_Provider_Refreshes'] = total_refreshed_providers logger.info('Test Ending...')
def test_workload_capacity_and_utilization_rep(appliance, request, scenario, setup_perf_provider): """Runs through provider based scenarios enabling C&U and replication, run for a set period of time. Memory Monitor creates graphs and summary at the end of each scenario. Polarion: assignee: rhcf3_machine casecomponent: CandU initialEstimate: 1/4h """ from_ts = int(time.time() * 1000) ssh_client = appliance.ssh_client() ssh_master_args = { 'hostname': scenario['replication_master']['ip_address'], 'username': scenario['replication_master']['ssh']['username'], 'password': scenario['replication_master']['ssh']['password']} master_appliance = IPAppliance(hostname=scenario['replication_master']['ip_address'], openshift_creds=ssh_master_args) ssh_client_master = SSHClient(**ssh_master_args) logger.debug('Scenario: {}'.format(scenario['name'])) is_pglogical = True if scenario['replication'] == 'pglogical' else False # Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario appliance.set_pglogical_replication(replication_type=':none') # Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace # under test is cleaned first, followed by master appliance sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log') sshtail_evm.set_initial_file_end() logger.info('Clean appliance under test ({})'.format(ssh_client)) appliance.clean_appliance() logger.info('Clean master appliance ({})'.format(ssh_client_master)) master_appliance.clean_appliance() # Clean Replication master appliance if is_pglogical: scenario_data = {'appliance_ip': appliance.hostname, 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-cap-and-util-rep', 'test_name': 'Capacity and Utilization Replication (pgLogical)', 'appliance_roles': ', '.join(roles_cap_and_util_rep), 'scenario': scenario} else: scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-cap-and-util-rep', 'test_name': 'Capacity and Utilization Replication (RubyRep)', 'appliance_roles': ', '.join(roles_cap_and_util_rep), 'scenario': scenario} quantifiers = {} monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(evm_tail=sshtail_evm, poll_interval=2) appliance.update_server_roles({role: True for role in roles_cap_and_util_rep}) for provider in scenario['providers']: get_crud(provider).create_rest() logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) appliance.set_cap_and_util_all_via_rails() # Configure Replication if is_pglogical: # Setup appliance under test to :remote appliance.set_pglogical_replication(replication_type=':remote') # Setup master appliance to :global master_appliance.set_pglogical_replication(replication_type=':global') # Setup master to subscribe: master_appliance.add_pglogical_replication_subscription(ssh_client_master, appliance.hostname) else: # Setup local towards Master appliance.set_rubyrep_replication(scenario['replication_master']['ip_address']) # Force uninstall rubyrep for this region from master (Unsure if still needed) # ssh_client.run_rake_command('evm:dbsync:uninstall') # time.sleep(30) # Wait to quiecse # Turn on DB Sync role appliance.update_server_roles({role: True for role in roles_cap_and_util_rep}) # Variable amount of time for C&U collections/processing total_time = scenario['total_time'] starttime = time.time() elapsed_time = 0 while (elapsed_time < total_time): elapsed_time = time.time() - starttime time_left = total_time - elapsed_time logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if (time_left > 0 and time_left < 300): time.sleep(time_left) elif time_left > 0: time.sleep(300) # Turn off replication: if is_pglogical: appliance.set_pglogical_replication(replication_type=':none') else: appliance.update_server_roles({role: True for role in roles_cap_and_util_rep}) quantifiers['Elapsed_Time'] = round(elapsed_time, 2) logger.info('Test Ending...')
def test_provisioning(appliance, request, scenario): """Runs through provisioning scenarios using the REST API to continously provision a VM for a specified period of time. Memory Monitor creates graphs and summary at the end of each scenario. Polarion: assignee: rhcf3_machine casecomponent: Provisioning initialEstimate: 1/4h """ from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) appliance.clean_appliance() quantifiers = {} scenario_data = { 'appliance_ip': appliance.hostname, 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-provisioning', 'test_name': 'Provisioning', 'appliance_roles': ', '.join(roles_provisioning), 'scenario': scenario } monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data) provision_order = [] def cleanup_workload(scenario, from_ts, vms_to_cleanup, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') appliance.update_server_roles( {role: True for role in roles_provisioning_cleanup}) monitor_thread.grafana_urls = g_urls monitor_thread.signal = False final_vm_size = len(vms_to_cleanup) appliance.rest_api.collections.vms.action.delete(vms_to_cleanup) monitor_thread.join() logger.info( '{} VMs were left over, and {} VMs were deleted in the finalizer.'. format(final_vm_size, final_vm_size - len(vms_to_cleanup))) logger.info( 'The following VMs were left over after the test: {}'.format( vms_to_cleanup)) quantifiers['VMs_To_Delete_In_Finalizer'] = final_vm_size quantifiers['VMs_Deleted_In_Finalizer'] = final_vm_size - len( vms_to_cleanup) quantifiers['Leftover_VMs'] = vms_to_cleanup add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info( 'Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, vm_name, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(poll_interval=2) appliance.update_server_roles({role: True for role in roles_provisioning}) prov = get_crud(scenario['providers'][0]) prov.create_rest() logger.info('Sleeping for Refresh: {}s'.format( scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) guid_list = prov.get_template_guids(scenario['templates']) guid_cycle = cycle(guid_list) cleanup_size = scenario['cleanup_size'] number_of_vms = scenario['number_of_vms'] total_time = scenario['total_time'] time_between_provision = scenario['time_between_provision'] total_provisioned_vms = 0 total_deleted_vms = 0 provisioned_vms = 0 starttime = time.time() while ((time.time() - starttime) < total_time): start_iteration_time = time.time() provision_list = [] for i in range(number_of_vms): total_provisioned_vms += 1 provisioned_vms += 1 vm_to_provision = 'test-{}-prov-{}'.format( test_ts, str(total_provisioned_vms).zfill(4)) guid_to_provision, provider_name = next(guid_cycle) provision_order.append((vm_to_provision, provider_name)) provision_list.append((vm_to_provision, guid_to_provision, prov.data['provisioning']['vlan'])) template = prov.data.templates.get('small_template') provision_data = get_provision_data(appliance.rest_api, prov, template.name) vm_name = provision_data["vm_fields"]["vm_name"] response = appliance.rest_api.collections.provision_requests.action.create( **provision_data) assert_response(appliance) provision_request = response[0] def _finished(): provision_request.reload() if "error" in provision_request.status.lower(): pytest.fail("Error when provisioning: `{}`".format( provision_request.message)) return provision_request.request_state.lower() in ("finished", "provisioned") wait_for(_finished, num_sec=800, delay=5, message="REST provisioning finishes") vm = appliance.rest_api.collections.vms.get(name=vm_name) creation_time = time.time() provision_time = round(creation_time - start_iteration_time, 2) logger.debug( 'Time to initiate provisioning: {}'.format(provision_time)) logger.info('{} VMs provisioned so far'.format(total_provisioned_vms)) if provisioned_vms > cleanup_size * len(scenario['providers']): start_remove_time = time.time() if appliance.rest_api.collections.vms.action.delete(vm): provision_order.pop(0) provisioned_vms -= 1 total_deleted_vms += 1 deletion_time = round(time.time() - start_remove_time, 2) logger.debug('Time to initate deleting: {}'.format(deletion_time)) logger.info('{} VMs deleted so far'.format(total_deleted_vms)) end_iteration_time = time.time() iteration_time = round(end_iteration_time - start_iteration_time, 2) elapsed_time = end_iteration_time - starttime logger.debug('Time to initiate provisioning and deletion: {}'.format( iteration_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if iteration_time < time_between_provision: wait_diff = time_between_provision - iteration_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_provision): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warning( 'Time to initiate provisioning ({}) exceeded time between ' '({})'.format(iteration_time, time_between_provision)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_VM_Provisionings'] = total_provisioned_vms quantifiers['Deleted_VMs'] = total_deleted_vms logger.info( 'Provisioned {} VMs and deleted {} VMs during the scenario.'.format( total_provisioned_vms, total_deleted_vms)) logger.info('Test Ending...')
def test_refresh_providers(appliance, request, scenario): """ Refreshes providers then waits for a specific amount of time. Memory Monitor creates graphs and summary at the end of the scenario. """ from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) appliance.clean_appliance() quantifiers = {} scenario_data = { 'appliance_ip': appliance.hostname, 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-refresh-providers', 'test_name': 'Refresh Providers', 'appliance_roles': ', '.join(roles_refresh_providers), 'scenario': scenario } monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info( 'Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(poll_interval=2) appliance.update_server_roles( {role: True for role in roles_refresh_providers}) for prov in scenario['providers']: get_crud(prov).create_rest() # Variable amount of time for refresh workload total_time = scenario['total_time'] starttime = time.time() time_between_refresh = scenario['time_between_refresh'] total_refreshed_providers = 0 while ((time.time() - starttime) < total_time): start_refresh_time = time.time() appliance.rest_api.collections.providers.reload() for prov in appliance.rest_api.collections.providers.all: prov.action.reload() total_refreshed_providers += 1 iteration_time = time.time() refresh_time = round(iteration_time - start_refresh_time, 2) elapsed_time = iteration_time - starttime logger.debug('Time to Queue Refreshes: {}'.format(refresh_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if refresh_time < time_between_refresh: wait_diff = time_between_refresh - refresh_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_refresh): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn('Time to Queue Refreshes ({}) exceeded time between ' '({})'.format(refresh_time, time_between_refresh)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_Provider_Refreshes'] = total_refreshed_providers logger.info('Test Ending...')
def test_refresh_vms(appliance, request, scenario): """Refreshes all vm's then waits for a specific amount of time. Memory Monitor creates graphs and summary at the end of the scenario. Polarion: assignee: rhcf3_machine casecomponent: Appliance initialEstimate: 1/4h """ from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) appliance.clean_appliance() quantifiers = {} scenario_data = {'appliance_ip': appliance.hostname, 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-refresh-vm', 'test_name': 'Refresh VMs', 'appliance_roles': ', '.join(roles_refresh_vms), 'scenario': scenario} monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(poll_interval=2) appliance.update_server_roles({role: True for role in roles_refresh_vms}) for prov in scenario['providers']: get_crud(prov).create_rest() logger.info('Sleeping for refresh: {}s'.format(scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) full_refresh_threshold_set = False if 'full_refresh_threshold' in scenario: if scenario['full_refresh_threshold'] != FULL_REFRESH_THRESHOLD_DEFAULT: appliance.set_full_refresh_threshold(scenario['full_refresh_threshold']) full_refresh_threshold_set = True if not full_refresh_threshold_set: logger.debug('Keeping full_refresh_threshold at default ({}).'.format( FULL_REFRESH_THRESHOLD_DEFAULT)) refresh_size = scenario['refresh_size'] vms = appliance.rest_api.collections.vms.all vms_iter = cycle(vms) logger.debug('Number of VM IDs: {}'.format(len(vms))) # Variable amount of time for refresh workload total_time = scenario['total_time'] starttime = time.time() time_between_refresh = scenario['time_between_refresh'] total_refreshed_vms = 0 while ((time.time() - starttime) < total_time): start_refresh_time = time.time() refresh_list = [next(vms_iter) for x in range(refresh_size)] for vm in refresh_list: vm.action.reload() total_refreshed_vms += len(refresh_list) iteration_time = time.time() refresh_time = round(iteration_time - start_refresh_time, 2) elapsed_time = iteration_time - starttime logger.debug('Time to Queue VM Refreshes: {}'.format(refresh_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if refresh_time < time_between_refresh: wait_diff = time_between_refresh - refresh_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_refresh): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn('Time to Queue VM Refreshes ({}) exceeded time between ' '({})'.format(refresh_time, time_between_refresh)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_VM_Refreshes'] = total_refreshed_vms logger.info('Test Ending...')
def test_refresh_vms(appliance, request, scenario): """Refreshes all vm's then waits for a specific amount of time. Memory Monitor creates graphs and summary at the end of the scenario.""" from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) appliance.clean_appliance() quantifiers = {} scenario_data = { 'appliance_ip': appliance.hostname, 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-refresh-vm', 'test_name': 'Refresh VMs', 'appliance_roles': ', '.join(roles_refresh_vms), 'scenario': scenario } monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info( 'Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(poll_interval=2) appliance.update_server_roles({role: True for role in roles_refresh_vms}) for prov in scenario['providers']: get_crud(prov).create_rest() logger.info('Sleeping for refresh: {}s'.format( scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) full_refresh_threshold_set = False if 'full_refresh_threshold' in scenario: if scenario['full_refresh_threshold'] != FULL_REFRESH_THRESHOLD_DEFAULT: appliance.set_full_refresh_threshold( scenario['full_refresh_threshold']) full_refresh_threshold_set = True if not full_refresh_threshold_set: logger.debug('Keeping full_refresh_threshold at default ({}).'.format( FULL_REFRESH_THRESHOLD_DEFAULT)) refresh_size = scenario['refresh_size'] vms = appliance.rest_api.collections.vms.all vms_iter = cycle(vms) logger.debug('Number of VM IDs: {}'.format(len(vms))) # Variable amount of time for refresh workload total_time = scenario['total_time'] starttime = time.time() time_between_refresh = scenario['time_between_refresh'] total_refreshed_vms = 0 while ((time.time() - starttime) < total_time): start_refresh_time = time.time() refresh_list = [next(vms_iter) for x in range(refresh_size)] for vm in refresh_list: vm.action.reload() total_refreshed_vms += len(refresh_list) iteration_time = time.time() refresh_time = round(iteration_time - start_refresh_time, 2) elapsed_time = iteration_time - starttime logger.debug('Time to Queue VM Refreshes: {}'.format(refresh_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if refresh_time < time_between_refresh: wait_diff = time_between_refresh - refresh_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_refresh): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn( 'Time to Queue VM Refreshes ({}) exceeded time between ' '({})'.format(refresh_time, time_between_refresh)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_VM_Refreshes'] = total_refreshed_vms logger.info('Test Ending...')
def test_workload_smartstate_analysis(appliance, request, scenario): """Runs through provider based scenarios initiating smart state analysis against VMs, Hosts, and Datastores Polarion: assignee: rhcf3_machine initialEstimate: 1/4h """ from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) appliance.install_vddk() appliance.clean_appliance() quantifiers = {} scenario_data = { 'appliance_ip': appliance.hostname, 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-ssa', 'test_name': 'SmartState Analysis', 'appliance_roles': ', '.join(roles_smartstate), 'scenario': scenario } monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info( 'Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(poll_interval=2) appliance.update_server_roles({role: True for role in roles_smartstate}) for provider in scenario['providers']: get_crud(provider).create_rest() logger.info('Sleeping for Refresh: {}s'.format( scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) # Add host credentials and set CFME relationship for RHEVM SSA for provider in scenario['providers']: for api_host in appliance.rest_api.collections.hosts.all: host_collection = appliance.collections.hosts test_host = host_collection.instantiate(name=api_host.name, provider=provider) host_data = get_host_data_by_name(get_crud(provider), api_host.name) credentials = host.get_credentials_from_config( host_data['credentials']) test_host.update_credentials_rest(credentials) appliance.set_cfme_server_relationship( cfme_performance['appliance']['appliance_name']) # Variable amount of time for SmartState Analysis workload total_time = scenario['total_time'] starttime = time.time() time_between_analyses = scenario['time_between_analyses'] total_scanned_vms = 0 while ((time.time() - starttime) < total_time): start_ssa_time = time.time() for vm in scenario['vms_to_scan'].values()[0]: vm_api = appliance.rest_api.collections.vms.get(name=vm) vm_api.action.scan() total_scanned_vms += 1 iteration_time = time.time() ssa_time = round(iteration_time - start_ssa_time, 2) elapsed_time = iteration_time - starttime logger.debug('Time to Queue SmartState Analyses: {}'.format(ssa_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if ssa_time < time_between_analyses: wait_diff = time_between_analyses - ssa_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_analyses): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn( 'Time to Queue SmartState Analyses ({}) exceeded time between ' '({})'.format(ssa_time, time_between_analyses)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_VM_Scans'] = total_scanned_vms logger.info('Test Ending...')
def test_provisioning(appliance, request, scenario): """Runs through provisioning scenarios using the REST API to continously provision a VM for a specified period of time. Memory Monitor creates graphs and summary at the end of each scenario.""" from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) appliance.clean_appliance() quantifiers = {} scenario_data = {'appliance_ip': appliance.hostname, 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-provisioning', 'test_name': 'Provisioning', 'appliance_roles': ', '.join(roles_provisioning), 'scenario': scenario} monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data) provision_order = [] def cleanup_workload(scenario, from_ts, vms_to_cleanup, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') appliance.update_server_roles({role: True for role in roles_provisioning_cleanup}) monitor_thread.grafana_urls = g_urls monitor_thread.signal = False final_vm_size = len(vms_to_cleanup) appliance.rest_api.collections.vms.action.delete(vms_to_cleanup) monitor_thread.join() logger.info('{} VMs were left over, and {} VMs were deleted in the finalizer.' .format(final_vm_size, final_vm_size - len(vms_to_cleanup))) logger.info('The following VMs were left over after the test: {}' .format(vms_to_cleanup)) quantifiers['VMs_To_Delete_In_Finalizer'] = final_vm_size quantifiers['VMs_Deleted_In_Finalizer'] = final_vm_size - len(vms_to_cleanup) quantifiers['Leftover_VMs'] = vms_to_cleanup add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, vm_name, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(poll_interval=2) appliance.update_server_roles({role: True for role in roles_provisioning}) prov = get_crud(scenario['providers'][0]) prov.create_rest() logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) guid_list = prov.get_template_guids(scenario['templates']) guid_cycle = cycle(guid_list) cleanup_size = scenario['cleanup_size'] number_of_vms = scenario['number_of_vms'] total_time = scenario['total_time'] time_between_provision = scenario['time_between_provision'] total_provisioned_vms = 0 total_deleted_vms = 0 provisioned_vms = 0 starttime = time.time() while ((time.time() - starttime) < total_time): start_iteration_time = time.time() provision_list = [] for i in range(number_of_vms): total_provisioned_vms += 1 provisioned_vms += 1 vm_to_provision = '{}-provision-{}'.format( test_ts, str(total_provisioned_vms).zfill(4)) guid_to_provision, provider_name = next(guid_cycle) provision_order.append((vm_to_provision, provider_name)) provision_list.append((vm_to_provision, guid_to_provision, prov.data['provisioning']['vlan'])) template = prov.data.templates.get('small_template') provision_data = get_provision_data(appliance.rest_api, prov, template.name) vm_name = provision_data["vm_fields"]["vm_name"] response = appliance.rest_api.collections.provision_requests.action.create(**provision_data) assert appliance.rest_api.response.status_code == 200 provision_request = response[0] def _finished(): provision_request.reload() if "error" in provision_request.status.lower(): pytest.fail("Error when provisioning: `{}`".format(provision_request.message)) return provision_request.request_state.lower() in ("finished", "provisioned") wait_for(_finished, num_sec=800, delay=5, message="REST provisioning finishes") vm = appliance.rest_api.collections.vms.get(name=vm_name) creation_time = time.time() provision_time = round(creation_time - start_iteration_time, 2) logger.debug('Time to initiate provisioning: {}'.format(provision_time)) logger.info('{} VMs provisioned so far'.format(total_provisioned_vms)) if provisioned_vms > cleanup_size * len(scenario['providers']): start_remove_time = time.time() if appliance.rest_api.collections.vms.action.delete(vm): provision_order.pop(0) provisioned_vms -= 1 total_deleted_vms += 1 deletion_time = round(time.time() - start_remove_time, 2) logger.debug('Time to initate deleting: {}'.format(deletion_time)) logger.info('{} VMs deleted so far'.format(total_deleted_vms)) end_iteration_time = time.time() iteration_time = round(end_iteration_time - start_iteration_time, 2) elapsed_time = end_iteration_time - starttime logger.debug('Time to initiate provisioning and deletion: {}'.format(iteration_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if iteration_time < time_between_provision: wait_diff = time_between_provision - iteration_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_provision): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn('Time to initiate provisioning ({}) exceeded time between ' '({})'.format(iteration_time, time_between_provision)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_VM_Provisionings'] = total_provisioned_vms quantifiers['Deleted_VMs'] = total_deleted_vms logger.info('Provisioned {} VMs and deleted {} VMs during the scenario.' .format(total_provisioned_vms, total_deleted_vms)) logger.info('Test Ending...')
def test_workload_smartstate_analysis(appliance, request, scenario): """Runs through provider based scenarios initiating smart state analysis against VMs, Hosts, and Datastores""" from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) appliance.install_vddk() appliance.clean_appliance() quantifiers = {} scenario_data = {'appliance_ip': appliance.hostname, 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-ssa', 'test_name': 'SmartState Analysis', 'appliance_roles': ', '.join(roles_smartstate), 'scenario': scenario} monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(poll_interval=2) appliance.update_server_roles({role: True for role in roles_smartstate}) for provider in scenario['providers']: get_crud(provider).create_rest() logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) # Add host credentials and set CFME relationship for RHEVM SSA for provider in scenario['providers']: for api_host in appliance.rest_api.collections.hosts.all: host_collection = appliance.collections.hosts test_host = host_collection.instantiate(name=api_host.name, provider=provider) host_data = get_host_data_by_name(get_crud(provider), api_host.name) credentials = host.get_credentials_from_config(host_data['credentials']) test_host.update_credentials_rest(credentials) appliance.set_cfme_server_relationship(cfme_performance['appliance']['appliance_name']) # Variable amount of time for SmartState Analysis workload total_time = scenario['total_time'] starttime = time.time() time_between_analyses = scenario['time_between_analyses'] total_scanned_vms = 0 while ((time.time() - starttime) < total_time): start_ssa_time = time.time() for vm in scenario['vms_to_scan'].values()[0]: vm_api = appliance.rest_api.collections.vms.get(name=vm) vm_api.action.scan() total_scanned_vms += 1 iteration_time = time.time() ssa_time = round(iteration_time - start_ssa_time, 2) elapsed_time = iteration_time - starttime logger.debug('Time to Queue SmartState Analyses: {}'.format(ssa_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if ssa_time < time_between_analyses: wait_diff = time_between_analyses - ssa_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_analyses): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn('Time to Queue SmartState Analyses ({}) exceeded time between ' '({})'.format(ssa_time, time_between_analyses)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_VM_Scans'] = total_scanned_vms logger.info('Test Ending...')