Exemple #1
0
def test_prepare_unsupported_script_for_windows_machine():
    # arrange
    windows_machine = machine_provider.default('Windows')

    # assert
    with pytest.raises(InterruptExecution):
        command.prepare(windows_machine, "network_latency")
Exemple #2
0
def network_latency(filter: str = None,
                    duration: int = 60,
                    delay: int = 200,
                    jitter: int = 50,
                    network_interface: str = "eth0",
                    configuration: Configuration = None,
                    secrets: Secrets = None):
    """Increases the response time of the virtual machine.

    **Please note**: This action is available only for Linux-based systems.

    Parameters
    ----------
    filter : str, optional
        Filter the virtual machine instance(s). If omitted a random instance from your subscription is selected.

    duration : int, optional
        How long the latency lasts. Defaults to 60 seconds.

    delay : int, optional
        Applied delay of the response time in milliseconds. Defaults to 200 milliseconds.

    jitter : int, optional
        Applied variance of +/- jitter to the delay of the response time in milliseconds. Defaults to 50 milliseconds.

    network_interface : str, optional
        The network interface where the network latency is applied to. Defaults to local ethernet eth0.
    """

    operation_name = network_latency.__name__
    logger.debug(
        "Starting {}: configuration='{}', filter='{}', duration='{}',"
        " delay='{}', jitter='{}', network_interface='{}'".format(
            operation_name, configuration, filter, duration, delay, jitter, network_interface))

    machines = fetch_machines(filter, configuration, secrets)
    clnt = client.init()

    machine_records = Records()

    futures = []
    with concurrent.futures.ThreadPoolExecutor(max_workers=len(machines)) as executor:
        for machine in machines:
            command_id, script_content = command.prepare(machine, operation_name)
            logger.debug("Script content: {}".format(script_content))
            parameters = command.fill_parameters(
                command_id, script_content, duration=duration, delay=delay, jitter=jitter,
                network_interface=network_interface)

            # collect future results
            futures.append(
                executor.submit(__long_poll_command, operation_name, machine, parameters, clnt))

        # wait for results
        for future in concurrent.futures.as_completed(futures):
            affected_machine = future.result()
            machine_records.add(cleanse.machine(affected_machine))

    return machine_records.output_as_dict('resources')
Exemple #3
0
def test_run():
    # arrange
    machine = machine_provider.default()
    cmd, parameters = command.prepare(machine, "stress_cpu")
    mocked_client = MagicMock(spec=ComputeManagementClient)

    # act
    command.run(machine['resourceGroup'], machine, parameters, mocked_client)
Exemple #4
0
def test_prepare_supported_script_for_windows_machine():
    # arrange
    windows_machine = machine_provider.default('Windows')

    # act
    cmd_id, script_content = command.prepare(windows_machine, "stress_cpu")

    # assert
    assert cmd_id == 'RunPowerShellScript'
Exemple #5
0
def test_run_for_unknown_type():
    # arrange
    machine = machine_provider.default()
    machine['type'] = "Microsoft.Compute/unknownType"

    # act
    with pytest.raises(InterruptExecution):
        cmd, parameters = command.prepare(machine, "stress_cpu")
        mocked_client = MagicMock(spec=ComputeManagementClient)
        command.run(machine['resourceGroup'], machine, parameters, mocked_client)
Exemple #6
0
def fill_disk(filter: str = None,
              duration: int = 120,
              size: int = 1000,
              path: str = None,
              configuration: Configuration = None,
              secrets: Secrets = None):
    """Fill the disk with random data.

    Parameters
    ----------
    filter : str, optional
        Filter the virtual machine instance(s). If omitted a random instance from your subscription is selected.

    duration : int, optional
        Lifetime of the file created. Defaults to 120 seconds.

    size : int
        Size of the file in megabytes created on the disk. Defaults to 1000 MB.

    path : str, optional
        The absolute path to write the fill file into.
        Defaults to ``C:\\burn`` for Windows clients and ``/root/burn`` for Linux clients.
    """

    logger.debug("Starting {}: configuration='{}', filter='{}', duration='{}', size='{}', path='{}'".format(
        fill_disk.__name__, configuration, filter, duration, size, path))

    machines = fetch_machines(filter, configuration, secrets)
    clnt = client.init()

    machine_records = Records()

    futures = []
    with concurrent.futures.ThreadPoolExecutor(max_workers=len(machines)) as executor:
        for machine in machines:
            command_id, script_content = command.prepare(machine, 'fill_disk')
            fill_path = command.prepare_path(machine, path)
            parameters = command.fill_parameters(
                command_id, script_content, duration=duration, size=size, path=fill_path)

            # collect future results
            futures.append(
                executor.submit(__long_poll_command, fill_disk.__name__, machine, parameters, clnt))

        # wait for results
        for future in concurrent.futures.as_completed(futures):
            affected_machine = future.result()
            machine_records.add(cleanse.machine(affected_machine))

    return machine_records.output_as_dict('resources')
Exemple #7
0
def burn_io(filter: str = None,
            duration: int = 60,
            path: str = None,
            configuration: Configuration = None,
            secrets: Secrets = None):
    """Simulate heavy disk I/O operations.

    Parameters
    ----------
    filter : str, optional
        Filter the virtual machines. If omitted a random instance from your subscription is selected.

    duration : int, optional
        How long the burn lasts. Defaults to 60 seconds.

    path : str, optional
        The absolute path to write the stress file into. Defaults to ``C:\\burn`` for Windows
        clients and ``/root/burn`` for Linux clients.
    """

    logger.debug(
        "Starting {}: configuration='{}', filter='{}', duration='{}',".format(
            burn_io.__name__, configuration, filter, duration))

    machines = fetch_machines(filter, configuration, secrets)
    clnt = client.init()

    machine_records = Records()

    futures = []
    with concurrent.futures.ThreadPoolExecutor(max_workers=len(machines)) as executor:
        for machine in machines:
            command_id, script_content = command.prepare(machine, 'burn_io')
            fill_path = command.prepare_path(machine, path)
            parameters = command.fill_parameters(command_id, script_content, duration=duration, path=fill_path)

            # collect future results
            futures.append(
                executor.submit(__long_poll_command, burn_io.__name__, machine, parameters, clnt))

        # wait for results
        for future in concurrent.futures.as_completed(futures):
            affected_machine = future.result()
            machine_records.add(cleanse.machine(affected_machine))

    return machine_records.output_as_dict('resources')
Exemple #8
0
def stress_cpu(filter: str = None,
               duration: int = 120,
               configuration: Configuration = None,
               secrets: Secrets = None):
    """Stress CPU up to 100% at virtual machines.

    Parameters
    ----------
    filter : str, optional
        Filter the virtual machine instance(s). If omitted a random instance from your subscription is selected.

    duration : int, optional
        Duration of the stress test (in seconds) that generates high CPU usage. Defaults to 120 seconds.
    """

    operation_name = stress_cpu.__name__

    logger.debug(
        "Starting {}: configuration='{}', filter='{}', duration='{}'".format(
            operation_name, configuration, filter, duration))

    machines = fetch_machines(filter, configuration, secrets)
    clnt = client.init()

    machine_records = Records()
    futures = []
    with concurrent.futures.ThreadPoolExecutor(max_workers=len(machines)) as executor:
        for machine in machines:
            command_id, script_content = command.prepare(machine, operation_name)
            parameters = command.fill_parameters(command_id, script_content, duration=duration)

            # collect future results
            futures.append(
                executor.submit(__long_poll_command, operation_name, machine, parameters, clnt))

        # wait for results
        for future in concurrent.futures.as_completed(futures):
            affected_machine = future.result()
            machine_records.add(cleanse.machine(affected_machine))

    return machine_records.output_as_dict('resources')