コード例 #1
0
def test_idle_default(request):
    """Runs an appliance at idle for specific amount of time. Memory Monitor creates graphs and
    summary at the end of the scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()

    clean_appliance(ssh_client)

    monitor_thread = SmemMemoryMonitor(SSHClient(), 'workload-idle', 'default',
        'Idle with Default Roles', get_server_roles_workload_idle_default(separator=', '),
        'No Providers')

    def cleanup_workload(from_ts):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_url = get_default_dashboard_url(from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_url = g_url
        monitor_thread.signal = False
        monitor_thread.join()
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(from_ts))

    monitor_thread.start()

    wait_for_miq_server_ready(poll_interval=2)
    # No need to set server roles as we are using the default set of roles

    s_time = cfme_performance['workloads']['test_idle_default']['total_time']
    logger.info('Idling appliance for {}s'.format(s_time))
    time.sleep(s_time)

    logger.info('Test Ending...')
コード例 #2
0
def test_refresh_providers(request, scenario):
    """Refreshes providers then waits for a specific amount of time. Memory Monitor creates graphs
    and summary at the end of the scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))

    clean_appliance(ssh_client)

    monitor_thread = SmemMemoryMonitor(SSHClient(), 'workload-refresh-providers', scenario['name'],
        'refresh-providers', get_server_roles_workload_refresh_providers(separator=','),
        ', '.join(scenario['providers']))

    def cleanup_workload(scenario, from_ts):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_url = g_url
        monitor_thread.signal = False
        monitor_thread.join()
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts))

    monitor_thread.start()

    wait_for_miq_server_ready(poll_interval=2)
    set_server_roles_workload_refresh_providers(ssh_client)
    add_providers(scenario['providers'])
    id_list = get_all_provider_ids()

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        refresh_providers(id_list)
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue Refreshes ({}) exceeded time between Refreshes({})'.format(
                refresh_time, time_between_refresh))

    logger.info('Test Ending...')
コード例 #3
0
def test_workload_capacity_and_utilization(request, scenario):
    """Runs through provider based scenarios enabling C&U and running for a set period of time.
    Memory Monitor creates graphs and summary at the end of each scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-cap-and-util',
        'test_name': 'Capacity and Utilization',
        'appliance_roles': get_server_roles_workload_cap_and_util(separator=', '),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_url = g_url
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    set_server_roles_workload_cap_and_util(ssh_client)
    add_providers(scenario['providers'])
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    set_cap_and_util_all_via_rails(ssh_client)

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
コード例 #4
0
def test_idle_default(request):
    """Runs an appliance at idle for specific amount of time. Memory Monitor creates graphs and
    summary at the end of the scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {
        'appliance_ip': cfme_performance['appliance']['ip_address'],
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-idle',
        'test_name': 'Idle with Default Roles',
        'appliance_roles':
        get_server_roles_workload_idle_default(separator=', '),
        'scenario': {
            'name': 'default'
        }
    }
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    def cleanup_workload(from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_url = get_default_dashboard_url(from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_url = g_url
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(
        lambda: cleanup_workload(from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    # No need to set server roles as we are using the default set of roles

    s_time = cfme_performance['workloads']['test_idle_default']['total_time']
    logger.info('Idling appliance for {}s'.format(s_time))
    time.sleep(s_time)

    quantifiers['Elapsed_Time'] = s_time
    logger.info('Test Ending...')
コード例 #5
0
def test_idle(request, scenario):
    """Runs an appliance at idle with specific roles turned on for specific amount of time. Memory
    Monitor creates graphs and summary at the end of the scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {
        'appliance_ip': cfme_performance['appliance']['ip_address'],
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-idle',
        'test_name': 'Idle with {} Roles'.format(scenario['name']),
        'appliance_roles': ', '.join(scenario['roles']),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    def cleanup_workload(from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(
        lambda: cleanup_workload(from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    set_server_roles(ssh_client, scenario['roles'])

    s_time = scenario['total_time']
    logger.info('Idling appliance for {}s'.format(s_time))
    time.sleep(s_time)

    quantifiers['Elapsed_Time'] = s_time
    logger.info('Test Ending...')
コード例 #6
0
ファイル: test_idle.py プロジェクト: akrzos/cfme-performance
def test_idle(request, scenario):
    """Runs an appliance at idle with specific roles turned on for specific amount of time. Memory
    Monitor creates graphs and summary at the end of the scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-idle',
        'test_name': 'Idle with {} Roles'.format(scenario['name']),
        'appliance_roles': ', '.join(scenario['roles']),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    def cleanup_workload(from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    set_server_roles(ssh_client, scenario['roles'])

    s_time = scenario['total_time']
    logger.info('Idling appliance for {}s'.format(s_time))
    time.sleep(s_time)

    quantifiers['Elapsed_Time'] = s_time
    logger.info('Test Ending...')
コード例 #7
0
def test_idle_default(request):
    """Runs an appliance at idle for specific amount of time. Memory Monitor creates graphs and
    summary at the end of the scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-idle',
        'test_name': 'Idle with Default Roles',
        'appliance_roles': get_server_roles_workload_idle_default(separator=', '),
        'scenario': {'name': 'default'}}
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    def cleanup_workload(from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_url = get_default_dashboard_url(from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_url = g_url
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    # No need to set server roles as we are using the default set of roles

    s_time = cfme_performance['workloads']['test_idle_default']['total_time']
    logger.info('Idling appliance for {}s'.format(s_time))
    time.sleep(s_time)

    quantifiers['Elapsed_Time'] = s_time
    logger.info('Test Ending...')
コード例 #8
0
def test_refresh_vms(request, scenario):
    """Refreshes all vm's then waits for a specific amount of time. Memory Monitor creates
    graphs and summary at the end of the scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {
        'appliance_ip': cfme_performance['appliance']['ip_address'],
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-refresh-vm',
        'test_name': 'Refresh VMs',
        'appliance_roles':
        get_server_roles_workload_refresh_vms(separator=', '),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    set_server_roles_workload_refresh_vms(ssh_client)
    add_providers(scenario['providers'])
    logger.info('Sleeping for refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    full_refresh_threshold_set = False
    if 'full_refresh_threshold' in scenario:
        if scenario['full_refresh_threshold'] != FULL_REFRESH_THRESHOLD_DEFAULT:
            set_full_refresh_threshold(ssh_client,
                                       scenario['full_refresh_threshold'])
            full_refresh_threshold_set = True
    if not full_refresh_threshold_set:
        logger.debug('Keeping full_refresh_threshold at default ({}).'.format(
            FULL_REFRESH_THRESHOLD_DEFAULT))

    refresh_size = scenario['refresh_size']
    vm_ids = get_all_vm_ids()
    vm_ids_iter = cycle(vm_ids)
    logger.debug('Number of VM IDs: {}'.format(len(vm_ids)))

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']
    total_refreshed_vms = 0

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        refresh_list = [next(vm_ids_iter) for x in range(refresh_size)]
        refresh_provider_vms_bulk(refresh_list)
        total_refreshed_vms += len(refresh_list)
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue VM Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn(
                'Time to Queue VM Refreshes ({}) exceeded time between '
                '({})'.format(refresh_time, time_between_refresh))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Refreshes'] = total_refreshed_vms
    logger.info('Test Ending...')
コード例 #9
0
def test_workload_smartstate_analysis(request, scenario):
    """Runs through provider based scenarios initiating smart state analysis against VMs, Hosts,
    and Datastores"""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))
    install_vddk(ssh_client)

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-ssa',
        'test_name': 'SmartState Analysis',
        'appliance_roles': get_server_roles_workload_smartstate(separator=', '),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_url = g_url
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    set_server_roles_workload_smartstate(ssh_client)
    add_providers(scenario['providers'])
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    # Add host credentials and set CFME relationship for RHEVM SSA
    for provider in scenario['providers']:
        add_host_credentials(cfme_performance['providers'][provider], ssh_client)
        if (cfme_performance['providers'][provider]['type'] ==
                "ManageIQ::Providers::Redhat::InfraManager"):
            set_cfme_server_relationship(ssh_client,
                cfme_performance['appliance']['appliance_name'])

    # Get list of VM ids by mapping provider name + vm name to the vm id
    vm_ids_to_scan = map_vms_to_ids(scenario['vms_to_scan'])

    # Variable amount of time for SmartState Analysis workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_analyses = scenario['time_between_analyses']
    total_scanned_VMs = 0

    while ((time.time() - starttime) < total_time):
        start_ssa_time = time.time()
        scan_provider_vms_bulk(vm_ids_to_scan)
        total_scanned_VMs += len(vm_ids_to_scan)
        iteration_time = time.time()

        ssa_time = round(iteration_time - start_ssa_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue SmartState Analyses: {}'.format(ssa_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if ssa_time < time_between_analyses:
            wait_diff = time_between_analyses - ssa_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_analyses):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue SmartState Analyses ({}) exceeded time between '
                '({})'.format(ssa_time, time_between_analyses))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Scans'] = total_scanned_VMs
    logger.info('Test Ending...')
コード例 #10
0
def test_refresh_providers(request, scenario):
    """Refreshes providers then waits for a specific amount of time. Memory Monitor creates graphs
    and summary at the end of the scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-refresh-providers',
        'test_name': 'Refresh Providers',
        'appliance_roles': get_server_roles_workload_refresh_providers(separator=', '),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    set_server_roles_workload_refresh_providers(ssh_client)
    add_providers(scenario['providers'])
    id_list = get_all_provider_ids()

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']
    total_refreshed_providers = 0

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        refresh_providers_bulk(id_list)
        total_refreshed_providers += len(id_list)
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue Refreshes ({}) exceeded time between '
                '({})'.format(refresh_time, time_between_refresh))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_Provider_Refreshes'] = total_refreshed_providers
    logger.info('Test Ending...')
コード例 #11
0
def test_provisioning(request, scenario):
    """Runs through provisioning scenarios using the REST API to
    continously provision a VM for a specified period of time.
    Memory Monitor creates graphs and summary at the end of each scenario."""

    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-provisioning',
        'test_name': 'Provisioning',
        'appliance_roles': get_server_roles_workload_provisioning(separator=', '),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    provision_order = []

    def cleanup_workload(scenario, from_ts, vms_to_cleanup, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        set_server_roles_workload_provisioning_cleanup(ssh_client)
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        final_vm_size = len(vms_to_cleanup)
        delete_provisioned_vms(vms_to_cleanup)
        monitor_thread.join()
        logger.info('{} VMs were left over, and {} VMs were deleted in the finalizer.'
            .format(final_vm_size, final_vm_size - len(vms_to_cleanup)))
        logger.info('The following VMs were left over after the test: {}'
            .format(vms_to_cleanup))
        quantifiers['VMs_To_Delete_In_Finalizer'] = final_vm_size
        quantifiers['VMs_Deleted_In_Finalizer'] = final_vm_size - len(vms_to_cleanup)
        quantifiers['Leftover_VMs'] = vms_to_cleanup
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, provision_order, quantifiers,
            scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    set_server_roles_workload_provisioning(ssh_client)
    add_providers(scenario['providers'])
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    guid_list = get_template_guids(scenario['templates'])
    guid_cycle = cycle(guid_list)
    cleanup_size = scenario['cleanup_size']
    number_of_vms = scenario['number_of_vms']
    total_time = scenario['total_time']
    time_between_provision = scenario['time_between_provision']
    total_provisioned_vms = 0
    total_deleted_vms = 0
    provisioned_vms = 0
    starttime = time.time()

    while ((time.time() - starttime) < total_time):
        start_iteration_time = time.time()
        provision_list = []
        for i in range(number_of_vms):
            total_provisioned_vms += 1
            provisioned_vms += 1
            vm_to_provision = '{}-provision-{}'.format(
                test_ts, str(total_provisioned_vms).zfill(4))
            guid_to_provision, provider_name = next(guid_cycle)
            provider_to_provision = cfme_performance['providers'][provider_name]
            provision_order.append((vm_to_provision, provider_name))
            provision_list.append((vm_to_provision, guid_to_provision,
                provider_to_provision['vlan_network']))

        provision_vm(provision_list)
        creation_time = time.time()
        provision_time = round(creation_time - start_iteration_time, 2)
        logger.debug('Time to initiate provisioning: {}'.format(provision_time))
        logger.info('{} VMs provisioned so far'.format(total_provisioned_vms))

        if provisioned_vms > cleanup_size * len(scenario['providers']):
            start_remove_time = time.time()
            if delete_provisioned_vm(provision_order[0]):
                provision_order.pop(0)
                provisioned_vms -= 1
                total_deleted_vms += 1
            deletion_time = round(time.time() - start_remove_time, 2)
            logger.debug('Time to initate deleting: {}'.format(deletion_time))
            logger.info('{} VMs deleted so far'.format(total_deleted_vms))

        end_iteration_time = time.time()
        iteration_time = round(end_iteration_time - start_iteration_time, 2)
        elapsed_time = end_iteration_time - starttime
        logger.debug('Time to initiate provisioning and deletion: {}'.format(iteration_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if iteration_time < time_between_provision:
            wait_diff = time_between_provision - iteration_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_provision):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
            else:
                logger.warn('Time to initiate provisioning ({}) exceeded time between '
                    '({})'.format(iteration_time, time_between_provision))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Provisionings'] = total_provisioned_vms
    quantifiers['Deleted_VMs'] = total_deleted_vms
    logger.info('Provisioned {} VMs and deleted {} VMs during the scenario.'
        .format(total_provisioned_vms, total_deleted_vms))
    logger.info('Test Ending...')
def test_workload_capacity_and_utilization_rep(request, scenario):
    """Runs through provider based scenarios enabling C&U and replication, run for a set period of
    time. Memory Monitor creates graphs and summary at the end of each scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    ssh_master_args = {
            'hostname': scenario['replication_master']['ip_address'],
            'username': scenario['replication_master']['ssh']['username'],
            'password': scenario['replication_master']['ssh']['password']
        }
    ssh_client_master = SSHClient(**ssh_master_args)
    logger.debug('Scenario: {}'.format(scenario['name']))

    is_pglogical = True if scenario['replication'] == 'pglogical' else False

    # Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario
    set_pglogical_replication(ssh_client_master, replication_type=':none')
    # Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace
    # under test is cleaned first, followed by master appliance
    sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log')
    sshtail_evm.set_initial_file_end()
    logger.info('Clean appliance under test ({})'.format(ssh_client))
    clean_appliance(ssh_client)
    logger.info('Clean master appliance ({})'.format(ssh_client_master))
    clean_appliance(ssh_client_master, False)  # Clean Replication master appliance

    if is_pglogical:
        scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (pgLogical)',
            'appliance_roles': get_server_roles_workload_cap_and_util(separator=', '),
            'scenario': scenario}
    else:
        scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
            'appliance_name': cfme_performance['appliance']['appliance_name'],
            'test_dir': 'workload-cap-and-util-rep',
            'test_name': 'Capacity and Utilization Replication (RubyRep)',
            'appliance_roles': get_server_roles_workload_cap_and_util_rep(separator=', '),
            'scenario': scenario}
    quantifiers = {}
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(evm_tail=sshtail_evm, poll_interval=2)
    set_server_roles_workload_cap_and_util(ssh_client)
    add_providers(scenario['providers'])
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    set_cap_and_util_all_via_rails(ssh_client)

    # Configure Replication
    if is_pglogical:
        # Setup appliance under test to :remote
        set_pglogical_replication(ssh_client, replication_type=':remote')
        # Setup master appliance to :global
        set_pglogical_replication(ssh_client_master, replication_type=':global')
        # Setup master to subscribe:
        add_pglogical_replication_subscription(ssh_client_master,
            cfme_performance['appliance']['ip_address'])
    else:
        # Setup local towards Master
        set_rubyrep_replication(ssh_client, scenario['replication_master']['ip_address'])
        # Force uninstall rubyrep for this region from master (Unsure if still needed)
        # ssh_client.run_rake_command('evm:dbsync:uninstall')
        # time.sleep(30)  # Wait to quiecse
        # Turn on DB Sync role
        set_server_roles_workload_cap_and_util_rep(ssh_client)

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    # Turn off replication:
    if is_pglogical:
        set_pglogical_replication(ssh_client_master, replication_type=':none')
    else:
        set_server_roles_workload_cap_and_util(ssh_client)

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
コード例 #13
0
def test_workload_capacity_and_utilization_rep(request, scenario):
    """Runs through provider based scenarios enabling C&U and replication, run for a set period of
    time. Memory Monitor creates graphs and summary at the end of each scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    ssh_master_args = {
        'hostname': scenario['replication_master']['ip_address'],
        'username': scenario['replication_master']['ssh']['username'],
        'password': scenario['replication_master']['ssh']['password']
    }
    ssh_client_master = SSHClient(**ssh_master_args)
    logger.debug('Scenario: {}'.format(scenario['name']))

    is_pglogical = True if scenario['replication'] == 'pglogical' else False

    # Turn off master pglogical replication incase rubyrep scenario follows a pglogical scenario
    set_pglogical_replication(ssh_client_master, replication_type=':none')
    # Spawn tail before hand to prevent unncessary waiting on MiqServer starting since applinace
    # under test is cleaned first, followed by master appliance
    sshtail_evm = SSHTail('/var/www/miq/vmdb/log/evm.log')
    sshtail_evm.set_initial_file_end()
    logger.info('Clean appliance under test ({})'.format(ssh_client))
    clean_appliance(ssh_client)
    logger.info('Clean master appliance ({})'.format(ssh_client_master))
    clean_appliance(ssh_client_master,
                    False)  # Clean Replication master appliance

    if is_pglogical:
        scenario_data = {
            'appliance_ip':
            cfme_performance['appliance']['ip_address'],
            'appliance_name':
            cfme_performance['appliance']['appliance_name'],
            'test_dir':
            'workload-cap-and-util-rep',
            'test_name':
            'Capacity and Utilization Replication (pgLogical)',
            'appliance_roles':
            get_server_roles_workload_cap_and_util(separator=', '),
            'scenario':
            scenario
        }
    else:
        scenario_data = {
            'appliance_ip':
            cfme_performance['appliance']['ip_address'],
            'appliance_name':
            cfme_performance['appliance']['appliance_name'],
            'test_dir':
            'workload-cap-and-util-rep',
            'test_name':
            'Capacity and Utilization Replication (RubyRep)',
            'appliance_roles':
            get_server_roles_workload_cap_and_util_rep(separator=', '),
            'scenario':
            scenario
        }
    quantifiers = {}
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(evm_tail=sshtail_evm, poll_interval=2)
    set_server_roles_workload_cap_and_util(ssh_client)
    add_providers(scenario['providers'])
    logger.info('Sleeping for Refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    set_cap_and_util_all_via_rails(ssh_client)

    # Configure Replication
    if is_pglogical:
        # Setup appliance under test to :remote
        set_pglogical_replication(ssh_client, replication_type=':remote')
        # Setup master appliance to :global
        set_pglogical_replication(ssh_client_master,
                                  replication_type=':global')
        # Setup master to subscribe:
        add_pglogical_replication_subscription(
            ssh_client_master, cfme_performance['appliance']['ip_address'])
    else:
        # Setup local towards Master
        set_rubyrep_replication(ssh_client,
                                scenario['replication_master']['ip_address'])
        # Force uninstall rubyrep for this region from master (Unsure if still needed)
        # ssh_client.run_rake_command('evm:dbsync:uninstall')
        # time.sleep(30)  # Wait to quiecse
        # Turn on DB Sync role
        set_server_roles_workload_cap_and_util_rep(ssh_client)

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    # Turn off replication:
    if is_pglogical:
        set_pglogical_replication(ssh_client_master, replication_type=':none')
    else:
        set_server_roles_workload_cap_and_util(ssh_client)

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
コード例 #14
0
def test_workload_smartstate_analysis(request, scenario):
    """Runs through provider based scenarios initiating smart state analysis against VMs, Hosts,
    and Datastores"""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))
    install_vddk(ssh_client)

    clean_appliance(ssh_client)

    monitor_thread = SmemMemoryMonitor(SSHClient(), 'workload-ssa', scenario['name'],
        'SmartState Analysis', get_server_roles_workload_smartstate(separator=', '),
        ', '.join(scenario['providers']))

    def cleanup_workload(scenario, from_ts):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_url = g_url
        monitor_thread.signal = False
        monitor_thread.join()
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts))

    monitor_thread.start()

    wait_for_miq_server_ready(poll_interval=2)
    set_server_roles_workload_smartstate(ssh_client)
    add_providers(scenario['providers'])
    logger.info('Sleeping for Refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    for provider in scenario['providers']:
        add_host_credentials(cfme_performance['providers'][provider])
        if (cfme_performance['providers'][provider]['type'] ==
                "ManageIQ::Providers::Redhat::InfraManager"):
            set_cfme_server_relationship(ssh_client,
                cfme_performance['appliance']['appliance_name'])

    # Variable amount of time for SmartState Analysis workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_analyses = scenario['time_between_analyses']

    vm_ids_to_scan = []
    for vm_name in scenario['vms_to_scan']:
        vm_ids_to_scan.append(get_vm_id(vm_name))

    while ((time.time() - starttime) < total_time):
        start_ssa_time = time.time()

        for vm_id in vm_ids_to_scan:
            scan_provider_vm(vm_id)

        iteration_time = time.time()
        ssa_time = round(iteration_time - start_ssa_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to initiate SmartState Analyses: {}'.format(ssa_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if ssa_time < time_between_analyses:
            wait_diff = time_between_analyses - ssa_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_analyses):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to initiate SmartState Analyses ({}) exceeded time between '
                '({})'.format(ssa_time, time_between_analyses))

    logger.info('Test Ending...')
コード例 #15
0
def test_workload_smartstate_analysis(request, scenario):
    """Runs through provider based scenarios initiating smart state analysis against VMs, Hosts,
    and Datastores"""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))
    install_vddk(ssh_client)

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {
        'appliance_ip': cfme_performance['appliance']['ip_address'],
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-ssa',
        'test_name': 'SmartState Analysis',
        'appliance_roles':
        get_server_roles_workload_smartstate(separator=', '),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_url = g_url
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    set_server_roles_workload_smartstate(ssh_client)
    add_providers(scenario['providers'])
    logger.info('Sleeping for Refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    # Add host credentials and set CFME relationship for RHEVM SSA
    for provider in scenario['providers']:
        add_host_credentials(cfme_performance['providers'][provider],
                             ssh_client)
        if (cfme_performance['providers'][provider]['type'] ==
                "ManageIQ::Providers::Redhat::InfraManager"):
            set_cfme_server_relationship(
                ssh_client, cfme_performance['appliance']['appliance_name'])

    # Get list of VM ids by mapping provider name + vm name to the vm id
    vm_ids_to_scan = map_vms_to_ids(scenario['vms_to_scan'])

    # Variable amount of time for SmartState Analysis workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_analyses = scenario['time_between_analyses']
    total_scanned_VMs = 0

    while ((time.time() - starttime) < total_time):
        start_ssa_time = time.time()
        scan_provider_vms_bulk(vm_ids_to_scan)
        total_scanned_VMs += len(vm_ids_to_scan)
        iteration_time = time.time()

        ssa_time = round(iteration_time - start_ssa_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue SmartState Analyses: {}'.format(ssa_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))

        if ssa_time < time_between_analyses:
            wait_diff = time_between_analyses - ssa_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_analyses):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn(
                'Time to Queue SmartState Analyses ({}) exceeded time between '
                '({})'.format(ssa_time, time_between_analyses))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Scans'] = total_scanned_VMs
    logger.info('Test Ending...')
コード例 #16
0
def test_workload_capacity_and_utilization(request, scenario):
    """Runs through provider based scenarios enabling C&U and running for a set period of time.
    Memory Monitor creates graphs and summary at the end of each scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {
        'appliance_ip': cfme_performance['appliance']['ip_address'],
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-cap-and-util',
        'test_name': 'Capacity and Utilization',
        'appliance_roles':
        get_server_roles_workload_cap_and_util(separator=', '),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    set_server_roles_workload_cap_and_util(ssh_client)
    add_providers(scenario['providers'])
    logger.info('Sleeping for Refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])
    set_cap_and_util_all_via_rails(ssh_client)

    # Variable amount of time for C&U collections/processing
    total_time = scenario['total_time']
    starttime = time.time()
    elapsed_time = 0
    while (elapsed_time < total_time):
        elapsed_time = time.time() - starttime
        time_left = total_time - elapsed_time
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))
        if (time_left > 0 and time_left < 300):
            time.sleep(time_left)
        elif time_left > 0:
            time.sleep(300)

    quantifiers['Elapsed_Time'] = round(elapsed_time, 2)
    logger.info('Test Ending...')
コード例 #17
0
def test_refresh_vms(request, scenario):
    """Refreshes all vm's then waits for a specific amount of time. Memory Monitor creates
    graphs and summary at the end of the scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {'appliance_ip': cfme_performance['appliance']['ip_address'],
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-refresh-vm',
        'test_name': 'Refresh VMs',
        'appliance_roles': get_server_roles_workload_refresh_vms(separator=', '),
        'scenario': scenario}
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_url = g_url
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info('Finished cleaning up monitoring thread in {}'.format(timediff))
    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts, quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    set_server_roles_workload_refresh_vms(ssh_client)
    add_providers(scenario['providers'])
    logger.info('Sleeping for refresh: {}s'.format(scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    full_refresh_threshold_set = False
    if 'full_refresh_threshold' in scenario:
        if scenario['full_refresh_threshold'] != FULL_REFRESH_THRESHOLD_DEFAULT:
            set_full_refresh_threshold(ssh_client, scenario['full_refresh_threshold'])
            full_refresh_threshold_set = True
    if not full_refresh_threshold_set:
        logger.debug('Keeping full_refresh_threshold at default ({}).'.format(
            FULL_REFRESH_THRESHOLD_DEFAULT))

    refresh_size = scenario['refresh_size']
    vm_ids = get_all_vm_ids()
    vm_ids_iter = cycle(vm_ids)
    logger.debug('Number of VM IDs: {}'.format(len(vm_ids)))

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']
    total_refreshed_vms = 0

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        refresh_list = [next(vm_ids_iter) for x in range(refresh_size)]
        refresh_provider_vms_bulk(refresh_list)
        total_refreshed_vms += len(refresh_list)
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue VM Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2), total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue VM Refreshes ({}) exceeded time between '
                '({})'.format(refresh_time, time_between_refresh))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Refreshes'] = total_refreshed_vms
    logger.info('Test Ending...')
コード例 #18
0
def test_refresh_providers(request, scenario):
    """Refreshes providers then waits for a specific amount of time. Memory Monitor creates graphs
    and summary at the end of the scenario."""
    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {
        'appliance_ip':
        cfme_performance['appliance']['ip_address'],
        'appliance_name':
        cfme_performance['appliance']['appliance_name'],
        'test_dir':
        'workload-refresh-providers',
        'test_name':
        'Refresh Providers',
        'appliance_roles':
        get_server_roles_workload_refresh_providers(separator=', '),
        'scenario':
        scenario
    }
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    def cleanup_workload(scenario, from_ts, quantifiers, scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_url = get_scenario_dashboard_url(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        monitor_thread.grafana_url = g_url
        monitor_thread.signal = False
        monitor_thread.join()
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(scenario, from_ts,
                                                  quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    set_server_roles_workload_refresh_providers(ssh_client)
    add_providers(scenario['providers'])
    id_list = get_all_provider_ids()

    # Variable amount of time for refresh workload
    total_time = scenario['total_time']
    starttime = time.time()
    time_between_refresh = scenario['time_between_refresh']
    total_refreshed_providers = 0

    while ((time.time() - starttime) < total_time):
        start_refresh_time = time.time()
        refresh_providers_bulk(id_list)
        total_refreshed_providers += len(id_list)
        iteration_time = time.time()

        refresh_time = round(iteration_time - start_refresh_time, 2)
        elapsed_time = iteration_time - starttime
        logger.debug('Time to Queue Refreshes: {}'.format(refresh_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))

        if refresh_time < time_between_refresh:
            wait_diff = time_between_refresh - refresh_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0 and time_remaining < time_between_refresh):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
        else:
            logger.warn('Time to Queue Refreshes ({}) exceeded time between '
                        '({})'.format(refresh_time, time_between_refresh))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_Provider_Refreshes'] = total_refreshed_providers
    logger.info('Test Ending...')
コード例 #19
0
def test_provisioning(request, scenario):
    """Runs through provisioning scenarios using the REST API to
    continously provision a VM for a specified period of time.
    Memory Monitor creates graphs and summary at the end of each scenario."""

    from_ts = int(time.time() * 1000)
    ssh_client = SSHClient()
    logger.debug('Scenario: {}'.format(scenario['name']))

    clean_appliance(ssh_client)

    quantifiers = {}
    scenario_data = {
        'appliance_ip': cfme_performance['appliance']['ip_address'],
        'appliance_name': cfme_performance['appliance']['appliance_name'],
        'test_dir': 'workload-provisioning',
        'test_name': 'Provisioning',
        'appliance_roles':
        get_server_roles_workload_provisioning(separator=', '),
        'scenario': scenario
    }
    monitor_thread = SmemMemoryMonitor(SSHClient(), scenario_data)

    provision_order = []

    def cleanup_workload(scenario, from_ts, vms_to_cleanup, quantifiers,
                         scenario_data):
        starttime = time.time()
        to_ts = int(starttime * 1000)
        g_urls = get_scenario_dashboard_urls(scenario, from_ts, to_ts)
        logger.debug('Started cleaning up monitoring thread.')
        set_server_roles_workload_provisioning_cleanup(ssh_client)
        monitor_thread.grafana_urls = g_urls
        monitor_thread.signal = False
        final_vm_size = len(vms_to_cleanup)
        delete_provisioned_vms(vms_to_cleanup)
        monitor_thread.join()
        logger.info(
            '{} VMs were left over, and {} VMs were deleted in the finalizer.'.
            format(final_vm_size, final_vm_size - len(vms_to_cleanup)))
        logger.info(
            'The following VMs were left over after the test: {}'.format(
                vms_to_cleanup))
        quantifiers['VMs_To_Delete_In_Finalizer'] = final_vm_size
        quantifiers['VMs_Deleted_In_Finalizer'] = final_vm_size - len(
            vms_to_cleanup)
        quantifiers['Leftover_VMs'] = vms_to_cleanup
        add_workload_quantifiers(quantifiers, scenario_data)
        timediff = time.time() - starttime
        logger.info(
            'Finished cleaning up monitoring thread in {}'.format(timediff))

    request.addfinalizer(lambda: cleanup_workload(
        scenario, from_ts, provision_order, quantifiers, scenario_data))

    monitor_thread.start()

    wait_for_miq_server_workers_started(poll_interval=2)
    set_server_roles_workload_provisioning(ssh_client)
    add_providers(scenario['providers'])
    logger.info('Sleeping for Refresh: {}s'.format(
        scenario['refresh_sleep_time']))
    time.sleep(scenario['refresh_sleep_time'])

    guid_list = get_template_guids(scenario['templates'])
    guid_cycle = cycle(guid_list)
    cleanup_size = scenario['cleanup_size']
    number_of_vms = scenario['number_of_vms']
    total_time = scenario['total_time']
    time_between_provision = scenario['time_between_provision']
    total_provisioned_vms = 0
    total_deleted_vms = 0
    provisioned_vms = 0
    starttime = time.time()

    while ((time.time() - starttime) < total_time):
        start_iteration_time = time.time()
        provision_list = []
        for i in range(number_of_vms):
            total_provisioned_vms += 1
            provisioned_vms += 1
            vm_to_provision = '{}-provision-{}'.format(
                test_ts,
                str(total_provisioned_vms).zfill(4))
            guid_to_provision, provider_name = next(guid_cycle)
            provider_to_provision = cfme_performance['providers'][
                provider_name]
            provision_order.append((vm_to_provision, provider_name))
            provision_list.append((vm_to_provision, guid_to_provision,
                                   provider_to_provision['vlan_network']))

        provision_vm(provision_list)
        creation_time = time.time()
        provision_time = round(creation_time - start_iteration_time, 2)
        logger.debug(
            'Time to initiate provisioning: {}'.format(provision_time))
        logger.info('{} VMs provisioned so far'.format(total_provisioned_vms))

        if provisioned_vms > cleanup_size * len(scenario['providers']):
            start_remove_time = time.time()
            if delete_provisioned_vm(provision_order[0]):
                provision_order.pop(0)
                provisioned_vms -= 1
                total_deleted_vms += 1
            deletion_time = round(time.time() - start_remove_time, 2)
            logger.debug('Time to initate deleting: {}'.format(deletion_time))
            logger.info('{} VMs deleted so far'.format(total_deleted_vms))

        end_iteration_time = time.time()
        iteration_time = round(end_iteration_time - start_iteration_time, 2)
        elapsed_time = end_iteration_time - starttime
        logger.debug('Time to initiate provisioning and deletion: {}'.format(
            iteration_time))
        logger.info('Time elapsed: {}/{}'.format(round(elapsed_time, 2),
                                                 total_time))

        if iteration_time < time_between_provision:
            wait_diff = time_between_provision - iteration_time
            time_remaining = total_time - elapsed_time
            if (time_remaining > 0
                    and time_remaining < time_between_provision):
                time.sleep(time_remaining)
            elif time_remaining > 0:
                time.sleep(wait_diff)
            else:
                logger.warn(
                    'Time to initiate provisioning ({}) exceeded time between '
                    '({})'.format(iteration_time, time_between_provision))

    quantifiers['Elapsed_Time'] = round(time.time() - starttime, 2)
    quantifiers['Queued_VM_Provisionings'] = total_provisioned_vms
    quantifiers['Deleted_VMs'] = total_deleted_vms
    logger.info(
        'Provisioned {} VMs and deleted {} VMs during the scenario.'.format(
            total_provisioned_vms, total_deleted_vms))
    logger.info('Test Ending...')