def test_idle_default(request): """Runs an appliance at idle for specific amount of time. Memory Monitor creates graphs and summary at the end of the scenario.""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() clean_appliance(ssh_client) monitor_thread = SmemMemoryMonitor(SSHClient(), 'workload-idle', 'default', 'Idle with Default Roles', get_server_roles_workload_idle_default(separator=', '), 'No Providers') def cleanup_workload(from_ts): starttime = time.time() to_ts = int(starttime * 1000) g_url = get_default_dashboard_url(from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_url = g_url monitor_thread.signal = False monitor_thread.join() timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(from_ts)) monitor_thread.start() wait_for_miq_server_ready(poll_interval=2) # No need to set server roles as we are using the default set of roles s_time = cfme_performance['workloads']['test_idle_default']['total_time'] logger.info('Idling appliance for {}s'.format(s_time)) time.sleep(s_time) logger.info('Test Ending...')
def test_refresh_providers(request, scenario): """Refreshes providers then waits for a specific amount of time. Memory Monitor creates graphs and summary at the end of the scenario.""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() logger.debug('Scenario: {}'.format(scenario['name'])) clean_appliance(ssh_client) monitor_thread = SmemMemoryMonitor(SSHClient(), 'workload-refresh-providers', scenario['name'], 'refresh-providers', get_server_roles_workload_refresh_providers(separator=','), ', '.join(scenario['providers'])) def cleanup_workload(scenario, from_ts): starttime = time.time() to_ts = int(starttime * 1000) g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_url = g_url monitor_thread.signal = False monitor_thread.join() timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts)) monitor_thread.start() wait_for_miq_server_ready(poll_interval=2) set_server_roles_workload_refresh_providers(ssh_client) add_providers(scenario['providers']) id_list = get_all_provider_ids() # Variable amount of time for refresh workload total_time = scenario['total_time'] starttime = time.time() time_between_refresh = scenario['time_between_refresh'] while ((time.time() - starttime) < total_time): start_refresh_time = time.time() refresh_providers(id_list) iteration_time = time.time() refresh_time = round(iteration_time - start_refresh_time, 2) elapsed_time = iteration_time - starttime logger.debug('Time to Queue Refreshes: {}'.format(refresh_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if refresh_time < time_between_refresh: wait_diff = time_between_refresh - refresh_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_refresh): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn('Time to Queue Refreshes ({}) exceeded time between Refreshes({})'.format( refresh_time, time_between_refresh)) logger.info('Test Ending...')
def test_workload_capacity_and_utilization(request, scenario): """Runs through provider based scenarios enabling C&U and running for a set period of time. Memory Monitor creates graphs and summary at the end of each scenario.""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() logger.debug('Scenario: {}'.format(scenario['name'])) clean_appliance(ssh_client) quantifiers = {} scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-cap-and-util', 'test_name': 'Capacity and Utilization', 'appliance_roles': get_server_roles_workload_cap_and_util(separator=', '), 'scenario': scenario} monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_url = g_url monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() wait_for_miq_server_workers_started(poll_interval=2) set_server_roles_workload_cap_and_util(ssh_client) add_providers(scenario['providers']) logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) set_cap_and_util_all_via_rails(ssh_client) # Variable amount of time for C&U collections/processing total_time = scenario['total_time'] starttime = time.time() elapsed_time = 0 while (elapsed_time < total_time): elapsed_time = time.time() - starttime time_left = total_time - elapsed_time logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if (time_left > 0 and time_left < 300): time.sleep(time_left) elif time_left > 0: time.sleep(300) quantifiers['Elapsed_Time'] = round(elapsed_time, 2) logger.info('Test Ending...')
def test_idle_default(request): """Runs an appliance at idle for specific amount of time. Memory Monitor creates graphs and summary at the end of the scenario.""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() clean_appliance(ssh_client) quantifiers = {} scenario_data = { 'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-idle', 'test_name': 'Idle with Default Roles', 'appliance_roles': get_server_roles_workload_idle_default(separator=', '), 'scenario': { 'name': 'default' } } monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data) def cleanup_workload(from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_url = get_default_dashboard_url(from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_url = g_url monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info( 'Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer( lambda: cleanup_workload(from_ts, quantifiers, scenario_data)) monitor_thread.start() wait_for_miq_server_workers_started(poll_interval=2) # No need to set server roles as we are using the default set of roles s_time = cfme_performance['workloads']['test_idle_default']['total_time'] logger.info('Idling appliance for {}s'.format(s_time)) time.sleep(s_time) quantifiers['Elapsed_Time'] = s_time logger.info('Test Ending...')
def test_idle(request, scenario): """Runs an appliance at idle with specific roles turned on for specific amount of time. Memory Monitor creates graphs and summary at the end of the scenario.""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() logger.debug('Scenario: {}'.format(scenario['name'])) clean_appliance(ssh_client) quantifiers = {} scenario_data = { 'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-idle', 'test_name': 'Idle with {} Roles'.format(scenario['name']), 'appliance_roles': ', '.join(scenario['roles']), 'scenario': scenario } monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data) def cleanup_workload(from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info( 'Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer( lambda: cleanup_workload(from_ts, quantifiers, scenario_data)) monitor_thread.start() wait_for_miq_server_workers_started(poll_interval=2) set_server_roles(ssh_client, scenario['roles']) s_time = scenario['total_time'] logger.info('Idling appliance for {}s'.format(s_time)) time.sleep(s_time) quantifiers['Elapsed_Time'] = s_time logger.info('Test Ending...')
def test_idle(request, scenario): """Runs an appliance at idle with specific roles turned on for specific amount of time. Memory Monitor creates graphs and summary at the end of the scenario.""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() logger.debug('Scenario: {}'.format(scenario['name'])) clean_appliance(ssh_client) quantifiers = {} scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-idle', 'test_name': 'Idle with {} Roles'.format(scenario['name']), 'appliance_roles': ', '.join(scenario['roles']), 'scenario': scenario} monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data) def cleanup_workload(from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(from_ts, quantifiers, scenario_data)) monitor_thread.start() wait_for_miq_server_workers_started(poll_interval=2) set_server_roles(ssh_client, scenario['roles']) s_time = scenario['total_time'] logger.info('Idling appliance for {}s'.format(s_time)) time.sleep(s_time) quantifiers['Elapsed_Time'] = s_time logger.info('Test Ending...')
def test_idle_default(request): """Runs an appliance at idle for specific amount of time. Memory Monitor creates graphs and summary at the end of the scenario.""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() clean_appliance(ssh_client) quantifiers = {} scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-idle', 'test_name': 'Idle with Default Roles', 'appliance_roles': get_server_roles_workload_idle_default(separator=', '), 'scenario': {'name': 'default'}} monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data) def cleanup_workload(from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_url = get_default_dashboard_url(from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_url = g_url monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(from_ts, quantifiers, scenario_data)) monitor_thread.start() wait_for_miq_server_workers_started(poll_interval=2) # No need to set server roles as we are using the default set of roles s_time = cfme_performance['workloads']['test_idle_default']['total_time'] logger.info('Idling appliance for {}s'.format(s_time)) time.sleep(s_time) quantifiers['Elapsed_Time'] = s_time logger.info('Test Ending...')
def test_provisioning(appliance, request, scenario): """Runs through provisioning scenarios using the REST API to continously provision a VM for a specified period of time. Memory Monitor creates graphs and summary at the end of each scenario.""" from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) appliance.clean_appliance() quantifiers = {} scenario_data = {'appliance_ip': appliance.hostname, 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-provisioning', 'test_name': 'Provisioning', 'appliance_roles': ', '.join(roles_provisioning), 'scenario': scenario} monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data) provision_order = [] def cleanup_workload(scenario, from_ts, vms_to_cleanup, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') appliance.update_server_roles({role: True for role in roles_provisioning_cleanup}) monitor_thread.grafana_urls = g_urls monitor_thread.signal = False final_vm_size = len(vms_to_cleanup) appliance.rest_api.collections.vms.action.delete(vms_to_cleanup) monitor_thread.join() logger.info('{} VMs were left over, and {} VMs were deleted in the finalizer.' .format(final_vm_size, final_vm_size - len(vms_to_cleanup))) logger.info('The following VMs were left over after the test: {}' .format(vms_to_cleanup)) quantifiers['VMs_To_Delete_In_Finalizer'] = final_vm_size quantifiers['VMs_Deleted_In_Finalizer'] = final_vm_size - len(vms_to_cleanup) quantifiers['Leftover_VMs'] = vms_to_cleanup add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, vm_name, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(poll_interval=2) appliance.update_server_roles({role: True for role in roles_provisioning}) prov = get_crud(scenario['providers'][0]) prov.create_rest() logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) guid_list = prov.get_template_guids(scenario['templates']) guid_cycle = cycle(guid_list) cleanup_size = scenario['cleanup_size'] number_of_vms = scenario['number_of_vms'] total_time = scenario['total_time'] time_between_provision = scenario['time_between_provision'] total_provisioned_vms = 0 total_deleted_vms = 0 provisioned_vms = 0 starttime = time.time() while ((time.time() - starttime) < total_time): start_iteration_time = time.time() provision_list = [] for i in range(number_of_vms): total_provisioned_vms += 1 provisioned_vms += 1 vm_to_provision = '{}-provision-{}'.format( test_ts, str(total_provisioned_vms).zfill(4)) guid_to_provision, provider_name = next(guid_cycle) provision_order.append((vm_to_provision, provider_name)) provision_list.append((vm_to_provision, guid_to_provision, prov.data['provisioning']['vlan'])) template = prov.data.get('small_template') provision_data = get_provision_data(appliance.rest_api, prov, template) vm_name = provision_data["vm_fields"]["vm_name"] response = appliance.rest_api.collections.provision_requests.action.create(**provision_data) assert appliance.rest_api.response.status_code == 200 provision_request = response[0] def _finished(): provision_request.reload() if "error" in provision_request.status.lower(): pytest.fail("Error when provisioning: `{}`".format(provision_request.message)) return provision_request.request_state.lower() in ("finished", "provisioned") wait_for(_finished, num_sec=800, delay=5, message="REST provisioning finishes") vm = appliance.rest_api.collections.vms.get(name=vm_name) creation_time = time.time() provision_time = round(creation_time - start_iteration_time, 2) logger.debug('Time to initiate provisioning: {}'.format(provision_time)) logger.info('{} VMs provisioned so far'.format(total_provisioned_vms)) if provisioned_vms > cleanup_size * len(scenario['providers']): start_remove_time = time.time() if appliance.rest_api.collections.vms.action.delete(vm): provision_order.pop(0) provisioned_vms -= 1 total_deleted_vms += 1 deletion_time = round(time.time() - start_remove_time, 2) logger.debug('Time to initate deleting: {}'.format(deletion_time)) logger.info('{} VMs deleted so far'.format(total_deleted_vms)) end_iteration_time = time.time() iteration_time = round(end_iteration_time - start_iteration_time, 2) elapsed_time = end_iteration_time - starttime logger.debug('Time to initiate provisioning and deletion: {}'.format(iteration_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if iteration_time < time_between_provision: wait_diff = time_between_provision - iteration_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_provision): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn('Time to initiate provisioning ({}) exceeded time between ' '({})'.format(iteration_time, time_between_provision)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_VM_Provisionings'] = total_provisioned_vms quantifiers['Deleted_VMs'] = total_deleted_vms logger.info('Provisioned {} VMs and deleted {} VMs during the scenario.' .format(total_provisioned_vms, total_deleted_vms)) logger.info('Test Ending...')
def test_workload_capacity_and_utilization(request, scenario, appliance): """Runs through provider based scenarios enabling C&U and running for a set period of time. Memory Monitor creates graphs and summary at the end of each scenario.""" from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) appliance.clean_appliance() quantifiers = {} scenario_data = { 'appliance_ip': appliance.hostname, 'appliance_name': conf.cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-cap-and-util', 'test_name': 'Capacity and Utilization', 'appliance_roles': ','.join(roles_cap_and_util), 'scenario': scenario } monitor_thread = SmemMemoryMonitor(appliance.ssh_client, scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info( 'Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(poll_interval=2) appliance.update_server_roles({role: True for role in roles_cap_and_util}) for provider in scenario['providers']: get_crud(provider).create_rest() logger.info('Sleeping for Refresh: {}s'.format( scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) appliance.set_cap_and_util_all_via_rails() # Variable amount of time for C&U collections/processing total_time = scenario['total_time'] starttime = time.time() elapsed_time = 0 while (elapsed_time < total_time): elapsed_time = time.time() - starttime time_left = total_time - elapsed_time logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if (time_left > 0 and time_left < 300): time.sleep(time_left) elif time_left > 0: time.sleep(300) quantifiers['Elapsed_Time'] = round(elapsed_time, 2) logger.info('Test Ending...')
def test_provisioning(appliance, request, scenario): """Runs through provisioning scenarios using the REST API to continously provision a VM for a specified period of time. Memory Monitor creates graphs and summary at the end of each scenario.""" from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) appliance.clean_appliance() quantifiers = {} scenario_data = { 'appliance_ip': appliance.hostname, 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-provisioning', 'test_name': 'Provisioning', 'appliance_roles': ', '.join(roles_provisioning), 'scenario': scenario } monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data) provision_order = [] def cleanup_workload(scenario, from_ts, vms_to_cleanup, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') appliance.update_server_roles( {role: True for role in roles_provisioning_cleanup}) monitor_thread.grafana_urls = g_urls monitor_thread.signal = False final_vm_size = len(vms_to_cleanup) appliance.rest_api.collections.vms.action.delete(vms_to_cleanup) monitor_thread.join() logger.info( '{} VMs were left over, and {} VMs were deleted in the finalizer.'. format(final_vm_size, final_vm_size - len(vms_to_cleanup))) logger.info( 'The following VMs were left over after the test: {}'.format( vms_to_cleanup)) quantifiers['VMs_To_Delete_In_Finalizer'] = final_vm_size quantifiers['VMs_Deleted_In_Finalizer'] = final_vm_size - len( vms_to_cleanup) quantifiers['Leftover_VMs'] = vms_to_cleanup add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info( 'Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, vm_name, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(poll_interval=2) appliance.update_server_roles({role: True for role in roles_provisioning}) prov = get_crud(scenario['providers'][0]) prov.create_rest() logger.info('Sleeping for Refresh: {}s'.format( scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) guid_list = prov.get_template_guids(scenario['templates']) guid_cycle = cycle(guid_list) cleanup_size = scenario['cleanup_size'] number_of_vms = scenario['number_of_vms'] total_time = scenario['total_time'] time_between_provision = scenario['time_between_provision'] total_provisioned_vms = 0 total_deleted_vms = 0 provisioned_vms = 0 starttime = time.time() while ((time.time() - starttime) < total_time): start_iteration_time = time.time() provision_list = [] for i in range(number_of_vms): total_provisioned_vms += 1 provisioned_vms += 1 vm_to_provision = '{}-provision-{}'.format( test_ts, str(total_provisioned_vms).zfill(4)) guid_to_provision, provider_name = next(guid_cycle) provision_order.append((vm_to_provision, provider_name)) provision_list.append((vm_to_provision, guid_to_provision, prov.data['provisioning']['vlan'])) template = prov.data.get('small_template') provision_data = get_provision_data(appliance.rest_api, prov, template) vm_name = provision_data["vm_fields"]["vm_name"] response = appliance.rest_api.collections.provision_requests.action.create( **provision_data) assert appliance.rest_api.response.status_code == 200 provision_request = response[0] def _finished(): provision_request.reload() if "error" in provision_request.status.lower(): pytest.fail("Error when provisioning: `{}`".format( provision_request.message)) return provision_request.request_state.lower() in ("finished", "provisioned") wait_for(_finished, num_sec=800, delay=5, message="REST provisioning finishes") vm = appliance.rest_api.collections.vms.get(name=vm_name) creation_time = time.time() provision_time = round(creation_time - start_iteration_time, 2) logger.debug( 'Time to initiate provisioning: {}'.format(provision_time)) logger.info('{} VMs provisioned so far'.format(total_provisioned_vms)) if provisioned_vms > cleanup_size * len(scenario['providers']): start_remove_time = time.time() if appliance.rest_api.collections.vms.action.delete(vm): provision_order.pop(0) provisioned_vms -= 1 total_deleted_vms += 1 deletion_time = round(time.time() - start_remove_time, 2) logger.debug('Time to initate deleting: {}'.format(deletion_time)) logger.info('{} VMs deleted so far'.format(total_deleted_vms)) end_iteration_time = time.time() iteration_time = round(end_iteration_time - start_iteration_time, 2) elapsed_time = end_iteration_time - starttime logger.debug('Time to initiate provisioning and deletion: {}'.format( iteration_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if iteration_time < time_between_provision: wait_diff = time_between_provision - iteration_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_provision): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn( 'Time to initiate provisioning ({}) exceeded time between ' '({})'.format(iteration_time, time_between_provision)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_VM_Provisionings'] = total_provisioned_vms quantifiers['Deleted_VMs'] = total_deleted_vms logger.info( 'Provisioned {} VMs and deleted {} VMs during the scenario.'.format( total_provisioned_vms, total_deleted_vms)) logger.info('Test Ending...')
def test_ui_single_page(request, scenario): """UI Workload to initiate navigations on the WebUI to dashboard and to various major pages.""" from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'ui-workload-single-page', 'test_name': 'UI Workload {}'.format(scenario['name']), 'appliance_roles': get_server_roles_ui_workload(), 'scenario': scenario} quantifiers = {} monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = round(time.time() - starttime, 2) logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers)) monitor_thread.start() cfme_ip = cfme_performance['appliance']['ip_address'] ui_user = cfme_performance['appliance']['web_ui']['username'] ui_password = cfme_performance['appliance']['web_ui']['password'] request_number = scenario['requests'] quantifiers['number of requests'] = request_number quantifiers['pages'] = OrderedDict() url = 'https://{}/'.format(cfme_ip) credentials = {'user_name': ui_user, 'user_password': ui_password} headers = {'Accept': 'text/html'} with requests.Session() as session: response = session.get(url, verify=False, allow_redirects=False, headers=headers) found = re.findall( r'\<meta\s*content\=\"([0-9a-zA-Z+\/]*\=\=)\"\s*name\=\"csrf\-token\"\s*\/\>', response.text) if found: headers['X-CSRF-Token'] = found[0] else: logger.error('CSRF Token not found.') response = session.post('{}{}'.format(url, 'dashboard/authenticate'), params=credentials, verify=False, allow_redirects=False, headers=headers) # Get a protected page now: for page in scenario['pages']: logger.info('Producing Navigations to: {}'.format(page)) requests_start = time.time() for i in range(request_number): navigation_start = time.time() response = session.get('{}{}'.format(url, page), verify=False, headers=headers) navigation_time = round(time.time() - navigation_start, 2) if page not in quantifiers['pages']: quantifiers['pages'][page] = OrderedDict() quantifiers['pages'][page]['navigations'] = 1 quantifiers['pages'][page][response.status_code] = 1 quantifiers['pages'][page]['timings'] = [] quantifiers['pages'][page]['timings'].append( {response.status_code: navigation_time}) else: quantifiers['pages'][page]['navigations'] += 1 quantifiers['pages'][page]['timings'].append( {response.status_code: navigation_time}) if response.status_code not in quantifiers['pages'][page]: quantifiers['pages'][page][response.status_code] = 1 else: quantifiers['pages'][page][response.status_code] += 1 if response.status_code == 503: # TODO: Better handling of this, typically 503 means the UIWorker has restarted logger.error('Status code 503 received, waiting 5s before next request') time.sleep(5) elif response.status_code != 200: logger.error('Non-200 HTTP status code: {} on {}'.format(response.status_code, page)) requests_time = round(time.time() - requests_start, 2) logger.info('Created {} Requests in {}s'.format(request_number, requests_time)) logger.info('Test Ending...')
def test_workload_smartstate_analysis(request, scenario): """Runs through provider based scenarios initiating smart state analysis against VMs, Hosts, and Datastores""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() logger.debug('Scenario: {}'.format(scenario['name'])) install_vddk(ssh_client) clean_appliance(ssh_client) quantifiers = {} scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-ssa', 'test_name': 'SmartState Analysis', 'appliance_roles': get_server_roles_workload_smartstate(separator=', '), 'scenario': scenario} monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_url = g_url monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() wait_for_miq_server_workers_started(poll_interval=2) set_server_roles_workload_smartstate(ssh_client) add_providers(scenario['providers']) logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) # Add host credentials and set CFME relationship for RHEVM SSA for provider in scenario['providers']: add_host_credentials(cfme_performance['providers'][provider], ssh_client) if (cfme_performance['providers'][provider]['type'] == "ManageIQ::Providers::Redhat::InfraManager"): set_cfme_server_relationship(ssh_client, cfme_performance['appliance']['appliance_name']) # Get list of VM ids by mapping provider name + vm name to the vm id vm_ids_to_scan = map_vms_to_ids(scenario['vms_to_scan']) # Variable amount of time for SmartState Analysis workload total_time = scenario['total_time'] starttime = time.time() time_between_analyses = scenario['time_between_analyses'] total_scanned_VMs = 0 while ((time.time() - starttime) < total_time): start_ssa_time = time.time() scan_provider_vms_bulk(vm_ids_to_scan) total_scanned_VMs += len(vm_ids_to_scan) iteration_time = time.time() ssa_time = round(iteration_time - start_ssa_time, 2) elapsed_time = iteration_time - starttime logger.debug('Time to Queue SmartState Analyses: {}'.format(ssa_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if ssa_time < time_between_analyses: wait_diff = time_between_analyses - ssa_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_analyses): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn('Time to Queue SmartState Analyses ({}) exceeded time between ' '({})'.format(ssa_time, time_between_analyses)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_VM_Scans'] = total_scanned_VMs logger.info('Test Ending...')
def test_workload_capacity_and_utilization_rep(appliance, request, scenario, setup_perf_provider): """Runs through provider based scenarios enabling C&U and replication, run for a set period of time. Memory Monitor creates graphs and summary at the end of each scenario.""" from_ts = int(time.time() * 1000) ssh_client = appliance.ssh_client() ssh_master_args = { 'hostname': scenario['replication_master']['ip_address'], 'username': scenario['replication_master']['ssh']['username'], 'password': scenario['replication_master']['ssh']['password']} master_appliance = IPAppliance(address=scenario['replication_master']['ip_address'], openshift_creds=ssh_master_args) ssh_client_master = SSHClient(**ssh_master_args) logger.debug('Scenario: {}'.format(scenario['name'])) is_pglogical = True if scenario['replication'] == 'pglogical' else False # Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario appliance.set_pglogical_replication(replication_type=':none') # Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace # under test is cleaned first, followed by master appliance sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log') sshtail_evm.set_initial_file_end() logger.info('Clean appliance under test ({})'.format(ssh_client)) appliance.clean_appliance() logger.info('Clean master appliance ({})'.format(ssh_client_master)) master_appliance.clean_appliance() # Clean Replication master appliance if is_pglogical: scenario_data = {'appliance_ip': appliance.hostname, 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-cap-and-util-rep', 'test_name': 'Capacity and Utilization Replication (pgLogical)', 'appliance_roles': ', '.join(roles_cap_and_util_rep), 'scenario': scenario} else: scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-cap-and-util-rep', 'test_name': 'Capacity and Utilization Replication (RubyRep)', 'appliance_roles': ', '.join(roles_cap_and_util_rep), 'scenario': scenario} quantifiers = {} monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(evm_tail=sshtail_evm, poll_interval=2) appliance.update_server_roles({role: True for role in roles_cap_and_util_rep}) for provider in scenario['providers']: get_crud(provider).create_rest() logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) appliance.set_cap_and_util_all_via_rails() # Configure Replication if is_pglogical: # Setup appliance under test to :remote appliance.set_pglogical_replication(replication_type=':remote') # Setup master appliance to :global master_appliance.set_pglogical_replication(replication_type=':global') # Setup master to subscribe: master_appliance.add_pglogical_replication_subscription(ssh_client_master, appliance.address) else: # Setup local towards Master appliance.set_rubyrep_replication(scenario['replication_master']['ip_address']) # Force uninstall rubyrep for this region from master (Unsure if still needed) # ssh_client.run_rake_command('evm:dbsync:uninstall') # time.sleep(30) # Wait to quiecse # Turn on DB Sync role appliance.update_server_roles({role: True for role in roles_cap_and_util_rep}) # Variable amount of time for C&U collections/processing total_time = scenario['total_time'] starttime = time.time() elapsed_time = 0 while (elapsed_time < total_time): elapsed_time = time.time() - starttime time_left = total_time - elapsed_time logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if (time_left > 0 and time_left < 300): time.sleep(time_left) elif time_left > 0: time.sleep(300) # Turn off replication: if is_pglogical: appliance.set_pglogical_replication(replication_type=':none') else: appliance.update_server_roles({role: True for role in roles_cap_and_util_rep}) quantifiers['Elapsed_Time'] = round(elapsed_time, 2) logger.info('Test Ending...')
def test_provisioning(request, scenario): """Runs through provisioning scenarios using the REST API to continously provision a VM for a specified period of time. Memory Monitor creates graphs and summary at the end of each scenario.""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() logger.debug('Scenario: {}'.format(scenario['name'])) clean_appliance(ssh_client) quantifiers = {} scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-provisioning', 'test_name': 'Provisioning', 'appliance_roles': get_server_roles_workload_provisioning(separator=', '), 'scenario': scenario} monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data) provision_order = [] def cleanup_workload(scenario, from_ts, vms_to_cleanup, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') set_server_roles_workload_provisioning_cleanup(ssh_client) monitor_thread.grafana_urls = g_urls monitor_thread.signal = False final_vm_size = len(vms_to_cleanup) delete_provisioned_vms(vms_to_cleanup) monitor_thread.join() logger.info('{} VMs were left over, and {} VMs were deleted in the finalizer.' .format(final_vm_size, final_vm_size - len(vms_to_cleanup))) logger.info('The following VMs were left over after the test: {}' .format(vms_to_cleanup)) quantifiers['VMs_To_Delete_In_Finalizer'] = final_vm_size quantifiers['VMs_Deleted_In_Finalizer'] = final_vm_size - len(vms_to_cleanup) quantifiers['Leftover_VMs'] = vms_to_cleanup add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, provision_order, quantifiers, scenario_data)) monitor_thread.start() wait_for_miq_server_workers_started(poll_interval=2) set_server_roles_workload_provisioning(ssh_client) add_providers(scenario['providers']) logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) guid_list = get_template_guids(scenario['templates']) guid_cycle = cycle(guid_list) cleanup_size = scenario['cleanup_size'] number_of_vms = scenario['number_of_vms'] total_time = scenario['total_time'] time_between_provision = scenario['time_between_provision'] total_provisioned_vms = 0 total_deleted_vms = 0 provisioned_vms = 0 starttime = time.time() while ((time.time() - starttime) < total_time): start_iteration_time = time.time() provision_list = [] for i in range(number_of_vms): total_provisioned_vms += 1 provisioned_vms += 1 vm_to_provision = '{}-provision-{}'.format( test_ts, str(total_provisioned_vms).zfill(4)) guid_to_provision, provider_name = next(guid_cycle) provider_to_provision = cfme_performance['providers'][provider_name] provision_order.append((vm_to_provision, provider_name)) provision_list.append((vm_to_provision, guid_to_provision, provider_to_provision['vlan_network'])) provision_vm(provision_list) creation_time = time.time() provision_time = round(creation_time - start_iteration_time, 2) logger.debug('Time to initiate provisioning: {}'.format(provision_time)) logger.info('{} VMs provisioned so far'.format(total_provisioned_vms)) if provisioned_vms > cleanup_size * len(scenario['providers']): start_remove_time = time.time() if delete_provisioned_vm(provision_order[0]): provision_order.pop(0) provisioned_vms -= 1 total_deleted_vms += 1 deletion_time = round(time.time() - start_remove_time, 2) logger.debug('Time to initate deleting: {}'.format(deletion_time)) logger.info('{} VMs deleted so far'.format(total_deleted_vms)) end_iteration_time = time.time() iteration_time = round(end_iteration_time - start_iteration_time, 2) elapsed_time = end_iteration_time - starttime logger.debug('Time to initiate provisioning and deletion: {}'.format(iteration_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if iteration_time < time_between_provision: wait_diff = time_between_provision - iteration_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_provision): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn('Time to initiate provisioning ({}) exceeded time between ' '({})'.format(iteration_time, time_between_provision)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_VM_Provisionings'] = total_provisioned_vms quantifiers['Deleted_VMs'] = total_deleted_vms logger.info('Provisioned {} VMs and deleted {} VMs during the scenario.' .format(total_provisioned_vms, total_deleted_vms)) logger.info('Test Ending...')
def test_workload_smartstate_analysis(request, scenario): """Runs through provider based scenarios initiating smart state analysis against VMs, Hosts, and Datastores""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() logger.debug('Scenario: {}'.format(scenario['name'])) install_vddk(ssh_client) clean_appliance(ssh_client) quantifiers = {} scenario_data = { 'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-ssa', 'test_name': 'SmartState Analysis', 'appliance_roles': get_server_roles_workload_smartstate(separator=', '), 'scenario': scenario } monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_url = g_url monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info( 'Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() wait_for_miq_server_workers_started(poll_interval=2) set_server_roles_workload_smartstate(ssh_client) add_providers(scenario['providers']) logger.info('Sleeping for Refresh: {}s'.format( scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) # Add host credentials and set CFME relationship for RHEVM SSA for provider in scenario['providers']: add_host_credentials(cfme_performance['providers'][provider], ssh_client) if (cfme_performance['providers'][provider]['type'] == "ManageIQ::Providers::Redhat::InfraManager"): set_cfme_server_relationship( ssh_client, cfme_performance['appliance']['appliance_name']) # Get list of VM ids by mapping provider name + vm name to the vm id vm_ids_to_scan = map_vms_to_ids(scenario['vms_to_scan']) # Variable amount of time for SmartState Analysis workload total_time = scenario['total_time'] starttime = time.time() time_between_analyses = scenario['time_between_analyses'] total_scanned_VMs = 0 while ((time.time() - starttime) < total_time): start_ssa_time = time.time() scan_provider_vms_bulk(vm_ids_to_scan) total_scanned_VMs += len(vm_ids_to_scan) iteration_time = time.time() ssa_time = round(iteration_time - start_ssa_time, 2) elapsed_time = iteration_time - starttime logger.debug('Time to Queue SmartState Analyses: {}'.format(ssa_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if ssa_time < time_between_analyses: wait_diff = time_between_analyses - ssa_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_analyses): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn( 'Time to Queue SmartState Analyses ({}) exceeded time between ' '({})'.format(ssa_time, time_between_analyses)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_VM_Scans'] = total_scanned_VMs logger.info('Test Ending...')
def test_workload_smartstate_analysis(appliance, request, scenario): """Runs through provider based scenarios initiating smart state analysis against VMs, Hosts, and Datastores""" from_ts = int(time.time() * 1000) logger.debug('Scenario: {}'.format(scenario['name'])) appliance.install_vddk() appliance.clean_appliance() quantifiers = {} scenario_data = {'appliance_ip': appliance.hostname, 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-ssa', 'test_name': 'SmartState Analysis', 'appliance_roles': ', '.join(roles_smartstate), 'scenario': scenario} monitor_thread = SmemMemoryMonitor(appliance.ssh_client(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() appliance.wait_for_miq_server_workers_started(poll_interval=2) appliance.update_server_roles({role: True for role in roles_smartstate}) for provider in scenario['providers']: get_crud(provider).create_rest() logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) # Add host credentials and set CFME relationship for RHEVM SSA for provider in scenario['providers']: for api_host in appliance.rest_api.collections.hosts.all: test_host = host.Host(name=api_host.name, provider=provider) host_data = get_host_data_by_name(get_crud(provider), api_host.name) credentials = host.get_credentials_from_config(host_data['credentials']) test_host.update_credentials_rest(credentials) appliance.set_cfme_server_relationship(cfme_performance['appliance']['appliance_name']) # Variable amount of time for SmartState Analysis workload total_time = scenario['total_time'] starttime = time.time() time_between_analyses = scenario['time_between_analyses'] total_scanned_vms = 0 while ((time.time() - starttime) < total_time): start_ssa_time = time.time() for vm in scenario['vms_to_scan'].values()[0]: vm_api = appliance.rest_api.collections.vms.get(name=vm) vm_api.action.scan() total_scanned_vms += 1 iteration_time = time.time() ssa_time = round(iteration_time - start_ssa_time, 2) elapsed_time = iteration_time - starttime logger.debug('Time to Queue SmartState Analyses: {}'.format(ssa_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if ssa_time < time_between_analyses: wait_diff = time_between_analyses - ssa_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_analyses): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn('Time to Queue SmartState Analyses ({}) exceeded time between ' '({})'.format(ssa_time, time_between_analyses)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_VM_Scans'] = total_scanned_vms logger.info('Test Ending...')
def test_refresh_providers(request, scenario): """Refreshes providers then waits for a specific amount of time. Memory Monitor creates graphs and summary at the end of the scenario.""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() logger.debug('Scenario: {}'.format(scenario['name'])) clean_appliance(ssh_client) quantifiers = {} scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-refresh-providers', 'test_name': 'Refresh Providers', 'appliance_roles': get_server_roles_workload_refresh_providers(separator=', '), 'scenario': scenario} monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() wait_for_miq_server_workers_started(poll_interval=2) set_server_roles_workload_refresh_providers(ssh_client) add_providers(scenario['providers']) id_list = get_all_provider_ids() # Variable amount of time for refresh workload total_time = scenario['total_time'] starttime = time.time() time_between_refresh = scenario['time_between_refresh'] total_refreshed_providers = 0 while ((time.time() - starttime) < total_time): start_refresh_time = time.time() refresh_providers_bulk(id_list) total_refreshed_providers += len(id_list) iteration_time = time.time() refresh_time = round(iteration_time - start_refresh_time, 2) elapsed_time = iteration_time - starttime logger.debug('Time to Queue Refreshes: {}'.format(refresh_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if refresh_time < time_between_refresh: wait_diff = time_between_refresh - refresh_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_refresh): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn('Time to Queue Refreshes ({}) exceeded time between ' '({})'.format(refresh_time, time_between_refresh)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_Provider_Refreshes'] = total_refreshed_providers logger.info('Test Ending...')
def test_provisioning(request, scenario): """Runs through provisioning scenarios using the REST API to continously provision a VM for a specified period of time. Memory Monitor creates graphs and summary at the end of each scenario.""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() logger.debug('Scenario: {}'.format(scenario['name'])) clean_appliance(ssh_client) quantifiers = {} scenario_data = { 'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-provisioning', 'test_name': 'Provisioning', 'appliance_roles': get_server_roles_workload_provisioning(separator=', '), 'scenario': scenario } monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data) provision_order = [] def cleanup_workload(scenario, from_ts, vms_to_cleanup, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') set_server_roles_workload_provisioning_cleanup(ssh_client) monitor_thread.grafana_urls = g_urls monitor_thread.signal = False final_vm_size = len(vms_to_cleanup) delete_provisioned_vms(vms_to_cleanup) monitor_thread.join() logger.info( '{} VMs were left over, and {} VMs were deleted in the finalizer.'. format(final_vm_size, final_vm_size - len(vms_to_cleanup))) logger.info( 'The following VMs were left over after the test: {}'.format( vms_to_cleanup)) quantifiers['VMs_To_Delete_In_Finalizer'] = final_vm_size quantifiers['VMs_Deleted_In_Finalizer'] = final_vm_size - len( vms_to_cleanup) quantifiers['Leftover_VMs'] = vms_to_cleanup add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info( 'Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload( scenario, from_ts, provision_order, quantifiers, scenario_data)) monitor_thread.start() wait_for_miq_server_workers_started(poll_interval=2) set_server_roles_workload_provisioning(ssh_client) add_providers(scenario['providers']) logger.info('Sleeping for Refresh: {}s'.format( scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) guid_list = get_template_guids(scenario['templates']) guid_cycle = cycle(guid_list) cleanup_size = scenario['cleanup_size'] number_of_vms = scenario['number_of_vms'] total_time = scenario['total_time'] time_between_provision = scenario['time_between_provision'] total_provisioned_vms = 0 total_deleted_vms = 0 provisioned_vms = 0 starttime = time.time() while ((time.time() - starttime) < total_time): start_iteration_time = time.time() provision_list = [] for i in range(number_of_vms): total_provisioned_vms += 1 provisioned_vms += 1 vm_to_provision = '{}-provision-{}'.format( test_ts, str(total_provisioned_vms).zfill(4)) guid_to_provision, provider_name = next(guid_cycle) provider_to_provision = cfme_performance['providers'][ provider_name] provision_order.append((vm_to_provision, provider_name)) provision_list.append((vm_to_provision, guid_to_provision, provider_to_provision['vlan_network'])) provision_vm(provision_list) creation_time = time.time() provision_time = round(creation_time - start_iteration_time, 2) logger.debug( 'Time to initiate provisioning: {}'.format(provision_time)) logger.info('{} VMs provisioned so far'.format(total_provisioned_vms)) if provisioned_vms > cleanup_size * len(scenario['providers']): start_remove_time = time.time() if delete_provisioned_vm(provision_order[0]): provision_order.pop(0) provisioned_vms -= 1 total_deleted_vms += 1 deletion_time = round(time.time() - start_remove_time, 2) logger.debug('Time to initate deleting: {}'.format(deletion_time)) logger.info('{} VMs deleted so far'.format(total_deleted_vms)) end_iteration_time = time.time() iteration_time = round(end_iteration_time - start_iteration_time, 2) elapsed_time = end_iteration_time - starttime logger.debug('Time to initiate provisioning and deletion: {}'.format( iteration_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if iteration_time < time_between_provision: wait_diff = time_between_provision - iteration_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_provision): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn( 'Time to initiate provisioning ({}) exceeded time between ' '({})'.format(iteration_time, time_between_provision)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_VM_Provisionings'] = total_provisioned_vms quantifiers['Deleted_VMs'] = total_deleted_vms logger.info( 'Provisioned {} VMs and deleted {} VMs during the scenario.'.format( total_provisioned_vms, total_deleted_vms)) logger.info('Test Ending...')
def test_refresh_vms(request, scenario): """Refreshes all vm's then waits for a specific amount of time. Memory Monitor creates graphs and summary at the end of the scenario.""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() logger.debug('Scenario: {}'.format(scenario['name'])) clean_appliance(ssh_client) quantifiers = {} scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-refresh-vm', 'test_name': 'Refresh VMs', 'appliance_roles': get_server_roles_workload_refresh_vms(separator=', '), 'scenario': scenario} monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_url = g_url monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() wait_for_miq_server_workers_started(poll_interval=2) set_server_roles_workload_refresh_vms(ssh_client) add_providers(scenario['providers']) logger.info('Sleeping for refresh: {}s'.format(scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) full_refresh_threshold_set = False if 'full_refresh_threshold' in scenario: if scenario['full_refresh_threshold'] != FULL_REFRESH_THRESHOLD_DEFAULT: set_full_refresh_threshold(ssh_client, scenario['full_refresh_threshold']) full_refresh_threshold_set = True if not full_refresh_threshold_set: logger.debug('Keeping full_refresh_threshold at default ({}).'.format( FULL_REFRESH_THRESHOLD_DEFAULT)) refresh_size = scenario['refresh_size'] vm_ids = get_all_vm_ids() vm_ids_iter = cycle(vm_ids) logger.debug('Number of VM IDs: {}'.format(len(vm_ids))) # Variable amount of time for refresh workload total_time = scenario['total_time'] starttime = time.time() time_between_refresh = scenario['time_between_refresh'] total_refreshed_vms = 0 while ((time.time() - starttime) < total_time): start_refresh_time = time.time() refresh_list = [next(vm_ids_iter) for x in range(refresh_size)] refresh_provider_vms_bulk(refresh_list) total_refreshed_vms += len(refresh_list) iteration_time = time.time() refresh_time = round(iteration_time - start_refresh_time, 2) elapsed_time = iteration_time - starttime logger.debug('Time to Queue VM Refreshes: {}'.format(refresh_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if refresh_time < time_between_refresh: wait_diff = time_between_refresh - refresh_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_refresh): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn('Time to Queue VM Refreshes ({}) exceeded time between ' '({})'.format(refresh_time, time_between_refresh)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_VM_Refreshes'] = total_refreshed_vms logger.info('Test Ending...')
def test_refresh_vms(request, scenario): """Refreshes all vm's then waits for a specific amount of time. Memory Monitor creates graphs and summary at the end of the scenario.""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() logger.debug('Scenario: {}'.format(scenario['name'])) clean_appliance(ssh_client) quantifiers = {} scenario_data = { 'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-refresh-vm', 'test_name': 'Refresh VMs', 'appliance_roles': get_server_roles_workload_refresh_vms(separator=', '), 'scenario': scenario } monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info( 'Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() wait_for_miq_server_workers_started(poll_interval=2) set_server_roles_workload_refresh_vms(ssh_client) add_providers(scenario['providers']) logger.info('Sleeping for refresh: {}s'.format( scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) full_refresh_threshold_set = False if 'full_refresh_threshold' in scenario: if scenario['full_refresh_threshold'] != FULL_REFRESH_THRESHOLD_DEFAULT: set_full_refresh_threshold(ssh_client, scenario['full_refresh_threshold']) full_refresh_threshold_set = True if not full_refresh_threshold_set: logger.debug('Keeping full_refresh_threshold at default ({}).'.format( FULL_REFRESH_THRESHOLD_DEFAULT)) refresh_size = scenario['refresh_size'] vm_ids = get_all_vm_ids() vm_ids_iter = cycle(vm_ids) logger.debug('Number of VM IDs: {}'.format(len(vm_ids))) # Variable amount of time for refresh workload total_time = scenario['total_time'] starttime = time.time() time_between_refresh = scenario['time_between_refresh'] total_refreshed_vms = 0 while ((time.time() - starttime) < total_time): start_refresh_time = time.time() refresh_list = [next(vm_ids_iter) for x in range(refresh_size)] refresh_provider_vms_bulk(refresh_list) total_refreshed_vms += len(refresh_list) iteration_time = time.time() refresh_time = round(iteration_time - start_refresh_time, 2) elapsed_time = iteration_time - starttime logger.debug('Time to Queue VM Refreshes: {}'.format(refresh_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if refresh_time < time_between_refresh: wait_diff = time_between_refresh - refresh_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_refresh): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn( 'Time to Queue VM Refreshes ({}) exceeded time between ' '({})'.format(refresh_time, time_between_refresh)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_VM_Refreshes'] = total_refreshed_vms logger.info('Test Ending...')
def test_workload_smartstate_analysis(request, scenario): """Runs through provider based scenarios initiating smart state analysis against VMs, Hosts, and Datastores""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() logger.debug('Scenario: {}'.format(scenario['name'])) install_vddk(ssh_client) clean_appliance(ssh_client) monitor_thread = SmemMemoryMonitor(SSHClient(), 'workload-ssa', scenario['name'], 'SmartState Analysis', get_server_roles_workload_smartstate(separator=', '), ', '.join(scenario['providers'])) def cleanup_workload(scenario, from_ts): starttime = time.time() to_ts = int(starttime * 1000) g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_url = g_url monitor_thread.signal = False monitor_thread.join() timediff = time.time() - starttime logger.info('Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts)) monitor_thread.start() wait_for_miq_server_ready(poll_interval=2) set_server_roles_workload_smartstate(ssh_client) add_providers(scenario['providers']) logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) for provider in scenario['providers']: add_host_credentials(cfme_performance['providers'][provider]) if (cfme_performance['providers'][provider]['type'] == "ManageIQ::Providers::Redhat::InfraManager"): set_cfme_server_relationship(ssh_client, cfme_performance['appliance']['appliance_name']) # Variable amount of time for SmartState Analysis workload total_time = scenario['total_time'] starttime = time.time() time_between_analyses = scenario['time_between_analyses'] vm_ids_to_scan = [] for vm_name in scenario['vms_to_scan']: vm_ids_to_scan.append(get_vm_id(vm_name)) while ((time.time() - starttime) < total_time): start_ssa_time = time.time() for vm_id in vm_ids_to_scan: scan_provider_vm(vm_id) iteration_time = time.time() ssa_time = round(iteration_time - start_ssa_time, 2) elapsed_time = iteration_time - starttime logger.debug('Time to initiate SmartState Analyses: {}'.format(ssa_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if ssa_time < time_between_analyses: wait_diff = time_between_analyses - ssa_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_analyses): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn('Time to initiate SmartState Analyses ({}) exceeded time between ' '({})'.format(ssa_time, time_between_analyses)) logger.info('Test Ending...')
def test_refresh_providers(request, scenario): """Refreshes providers then waits for a specific amount of time. Memory Monitor creates graphs and summary at the end of the scenario.""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() logger.debug('Scenario: {}'.format(scenario['name'])) clean_appliance(ssh_client) quantifiers = {} scenario_data = { 'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-refresh-providers', 'test_name': 'Refresh Providers', 'appliance_roles': get_server_roles_workload_refresh_providers(separator=', '), 'scenario': scenario } monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_url = g_url monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info( 'Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() wait_for_miq_server_workers_started(poll_interval=2) set_server_roles_workload_refresh_providers(ssh_client) add_providers(scenario['providers']) id_list = get_all_provider_ids() # Variable amount of time for refresh workload total_time = scenario['total_time'] starttime = time.time() time_between_refresh = scenario['time_between_refresh'] total_refreshed_providers = 0 while ((time.time() - starttime) < total_time): start_refresh_time = time.time() refresh_providers_bulk(id_list) total_refreshed_providers += len(id_list) iteration_time = time.time() refresh_time = round(iteration_time - start_refresh_time, 2) elapsed_time = iteration_time - starttime logger.debug('Time to Queue Refreshes: {}'.format(refresh_time)) logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if refresh_time < time_between_refresh: wait_diff = time_between_refresh - refresh_time time_remaining = total_time - elapsed_time if (time_remaining > 0 and time_remaining < time_between_refresh): time.sleep(time_remaining) elif time_remaining > 0: time.sleep(wait_diff) else: logger.warn('Time to Queue Refreshes ({}) exceeded time between ' '({})'.format(refresh_time, time_between_refresh)) quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2) quantifiers['Queued_Provider_Refreshes'] = total_refreshed_providers logger.info('Test Ending...')
def test_workload_capacity_and_utilization_rep(request, scenario): """Runs through provider based scenarios enabling C&U and replication, run for a set period of time. Memory Monitor creates graphs and summary at the end of each scenario.""" from_ts = int(time.time() * 1000) ssh_client = SSHClient() ssh_master_args = { 'hostname': scenario['replication_master']['ip_address'], 'username': scenario['replication_master']['ssh']['username'], 'password': scenario['replication_master']['ssh']['password'] } ssh_client_master = SSHClient(**ssh_master_args) logger.debug('Scenario: {}'.format(scenario['name'])) is_pglogical = True if scenario['replication'] == 'pglogical' else False # Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario set_pglogical_replication(ssh_client_master, replication_type=':none') # Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace # under test is cleaned first, followed by master appliance sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log') sshtail_evm.set_initial_file_end() logger.info('Clean appliance under test ({})'.format(ssh_client)) clean_appliance(ssh_client) logger.info('Clean master appliance ({})'.format(ssh_client_master)) clean_appliance(ssh_client_master, False) # Clean Replication master appliance if is_pglogical: scenario_data = { 'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-cap-and-util-rep', 'test_name': 'Capacity and Utilization Replication (pgLogical)', 'appliance_roles': get_server_roles_workload_cap_and_util(separator=', '), 'scenario': scenario } else: scenario_data = { 'appliance_ip': cfme_performance['appliance']['ip_address'], 'appliance_name': cfme_performance['appliance']['appliance_name'], 'test_dir': 'workload-cap-and-util-rep', 'test_name': 'Capacity and Utilization Replication (RubyRep)', 'appliance_roles': get_server_roles_workload_cap_and_util_rep(separator=', '), 'scenario': scenario } quantifiers = {} monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data) def cleanup_workload(scenario, from_ts, quantifiers, scenario_data): starttime = time.time() to_ts = int(starttime * 1000) g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts) logger.debug('Started cleaning up monitoring thread.') monitor_thread.grafana_urls = g_urls monitor_thread.signal = False monitor_thread.join() add_workload_quantifiers(quantifiers, scenario_data) timediff = time.time() - starttime logger.info( 'Finished cleaning up monitoring thread in {}'.format(timediff)) request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data)) monitor_thread.start() wait_for_miq_server_workers_started(evm_tail=sshtail_evm, poll_interval=2) set_server_roles_workload_cap_and_util(ssh_client) add_providers(scenario['providers']) logger.info('Sleeping for Refresh: {}s'.format( scenario['refresh_sleep_time'])) time.sleep(scenario['refresh_sleep_time']) set_cap_and_util_all_via_rails(ssh_client) # Configure Replication if is_pglogical: # Setup appliance under test to :remote set_pglogical_replication(ssh_client, replication_type=':remote') # Setup master appliance to :global set_pglogical_replication(ssh_client_master, replication_type=':global') # Setup master to subscribe: add_pglogical_replication_subscription( ssh_client_master, cfme_performance['appliance']['ip_address']) else: # Setup local towards Master set_rubyrep_replication(ssh_client, scenario['replication_master']['ip_address']) # Force uninstall rubyrep for this region from master (Unsure if still needed) # ssh_client.run_rake_command('evm:dbsync:uninstall') # time.sleep(30) # Wait to quiecse # Turn on DB Sync role set_server_roles_workload_cap_and_util_rep(ssh_client) # Variable amount of time for C&U collections/processing total_time = scenario['total_time'] starttime = time.time() elapsed_time = 0 while (elapsed_time < total_time): elapsed_time = time.time() - starttime time_left = total_time - elapsed_time logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time)) if (time_left > 0 and time_left < 300): time.sleep(time_left) elif time_left > 0: time.sleep(300) # Turn off replication: if is_pglogical: set_pglogical_replication(ssh_client_master, replication_type=':none') else: set_server_roles_workload_cap_and_util(ssh_client) quantifiers['Elapsed_Time'] = round(elapsed_time, 2) logger.info('Test Ending...')