Exemple #1
0
def _worker_process(queue, iteration_gen, timeout, times, max_concurrent,
                    context, cls, method_name, args, event_queue, aborted,
                    runs_per_second, rps_cfg, processes_to_start, info):
    """Start scenario within threads.

    Spawn N threads per second. Each thread runs the scenario once, and appends
    result to queue. A maximum of max_concurrent threads will be ran
    concurrently.

    :param queue: queue object to append results
    :param iteration_gen: next iteration number generator
    :param timeout: operation's timeout
    :param times: total number of scenario iterations to be run
    :param max_concurrent: maximum worker concurrency
    :param context: scenario context object
    :param cls: scenario class
    :param method_name: scenario method name
    :param args: scenario args
    :param aborted: multiprocessing.Event that aborts load generation if
                    the flag is set
    :param runs_per_second: function that should return desired rps value
    :param rps_cfg: rps section from task config
    :param processes_to_start: int, number of started processes for scenario
                               execution
    :param info: info about all processes count and counter of runned process
    """

    pool = collections.deque()
    if isinstance(rps_cfg, dict):
        rps = rps_cfg["start"]
    else:
        rps = rps_cfg
    sleep = 1.0 / rps

    runner._log_worker_info(times=times,
                            rps=rps,
                            timeout=timeout,
                            cls=cls,
                            method_name=method_name,
                            args=args)

    time.sleep(
        (sleep * info["processes_counter"]) / info["processes_to_start"])

    start = time.time()
    timeout_queue = Queue.Queue()

    if timeout:
        collector_thr_by_timeout = threading.Thread(
            target=utils.timeout_thread, args=(timeout_queue, ))
        collector_thr_by_timeout.start()

    i = 0
    while i < times and not aborted.is_set():
        scenario_context = runner._get_scenario_context(
            next(iteration_gen), context)
        worker_args = (queue, cls, method_name, scenario_context, args,
                       event_queue)
        thread = threading.Thread(target=runner._worker_thread,
                                  args=worker_args)

        i += 1
        thread.start()
        if timeout:
            timeout_queue.put((thread, time.time() + timeout))
        pool.append(thread)

        time_gap = time.time() - start
        real_rps = i / time_gap if time_gap else "Infinity"

        LOG.debug(
            "Worker: %s rps: %s (requested rps: %s)" %
            (i, real_rps, runs_per_second(rps_cfg, start, processes_to_start)))

        # try to join latest thread(s) until it finished, or until time to
        # start new thread (if we have concurrent slots available)
        while i / (time.time() - start) > runs_per_second(
                rps_cfg, start, processes_to_start) or (len(pool) >=
                                                        max_concurrent):
            if pool:
                pool[0].join(0.001)
                if not pool[0].isAlive():
                    pool.popleft()
            else:
                time.sleep(0.001)

    while pool:
        pool.popleft().join()

    if timeout:
        timeout_queue.put((
            None,
            None,
        ))
        collector_thr_by_timeout.join()
Exemple #2
0
def _worker_process(queue, iteration_gen, timeout, concurrency, times,
                    context, cls, method_name, args, aborted, info):
    """Start the scenario within threads.

    Spawn threads to support scenario execution for a fixed number of times.
    This generates a constant load on the cloud under test by executing each
    scenario iteration without pausing between iterations. Each thread runs
    the scenario method once with passed scenario arguments and context.
    After execution the result is appended to the queue.

    :param queue: queue object to append results
    :param iteration_gen: next iteration number generator
    :param timeout: operation's timeout
    :param concurrency: number of concurrently running scenario iterations
    :param times: total number of scenario iterations to be run
    :param context: scenario context object
    :param cls: scenario class
    :param method_name: scenario method name
    :param args: scenario args
    :param aborted: multiprocessing.Event that aborts load generation if
                    the flag is set
    :param info: info about all processes count and counter of launched process
    """

    pool = collections.deque()
    alive_threads_in_pool = 0
    finished_threads_in_pool = 0

    runner._log_worker_info(times=times, concurrency=concurrency,
                            timeout=timeout, cls=cls, method_name=method_name,
                            args=args)

    if timeout:
        timeout_queue = Queue.Queue()
        collector_thr_by_timeout = threading.Thread(
            target=utils.timeout_thread,
            args=(timeout_queue, )
        )
        collector_thr_by_timeout.start()

    iteration = next(iteration_gen)
    while iteration < times and not aborted.is_set():
        scenario_context = runner._get_scenario_context(iteration, context)
        worker_args = (queue, cls, method_name, scenario_context, args)

        thread = threading.Thread(target=runner._worker_thread,
                                  args=worker_args)

        thread.start()
        if timeout:
            timeout_queue.put((thread.ident, time.time() + timeout))
        pool.append((thread, time.time()))
        alive_threads_in_pool += 1

        while alive_threads_in_pool == concurrency:
            prev_finished_threads_in_pool = finished_threads_in_pool
            finished_threads_in_pool = 0
            for t in pool:
                if not t[0].isAlive():
                    finished_threads_in_pool += 1

            alive_threads_in_pool -= finished_threads_in_pool
            alive_threads_in_pool += prev_finished_threads_in_pool

            if alive_threads_in_pool < concurrency:
                # NOTE(boris-42): cleanup pool array. This is required because
                # in other case array length will be equal to times which
                # is unlimited big
                while pool and not pool[0][0].isAlive():
                    pool.popleft()[0].join()
                    finished_threads_in_pool -= 1
                break

            # we should wait to not create big noise with these checks
            time.sleep(0.001)
        iteration = next(iteration_gen)

    # Wait until all threads are done
    while pool:
        pool.popleft()[0].join()

    if timeout:
        timeout_queue.put((None, None,))
        collector_thr_by_timeout.join()
def _worker_process(queue, iteration_gen, timeout, rps, times, max_concurrent,
                    context, cls, method_name, args, aborted, info):
    """Start scenario within threads.

    Spawn N threads per second. Each thread runs the scenario once, and appends
    result to queue. A maximum of max_concurrent threads will be ran
    concurrently.

    :param queue: queue object to append results
    :param iteration_gen: next iteration number generator
    :param timeout: operation's timeout
    :param rps: number of scenario iterations to be run per one second
    :param times: total number of scenario iterations to be run
    :param max_concurrent: maximum worker concurrency
    :param context: scenario context object
    :param cls: scenario class
    :param method_name: scenario method name
    :param args: scenario args
    :param aborted: multiprocessing.Event that aborts load generation if
                    the flag is set
    :param info: info about all processes count and counter of runned process
    """

    pool = collections.deque()
    sleep = 1.0 / rps

    runner._log_worker_info(times=times,
                            rps=rps,
                            timeout=timeout,
                            cls=cls,
                            method_name=method_name,
                            args=args)

    time.sleep(
        (sleep * info["processes_counter"]) / info["processes_to_start"])

    start = time.time()

    i = 0
    while i < times and not aborted.is_set():
        scenario_context = runner._get_scenario_context(context)
        scenario_args = (next(iteration_gen), cls, method_name,
                         scenario_context, args)
        worker_args = (queue, scenario_args)
        thread = threading.Thread(target=runner._worker_thread,
                                  args=worker_args)
        i += 1
        thread.start()
        pool.append(thread)

        time_gap = time.time() - start
        real_rps = i / time_gap if time_gap else "Infinity"

        LOG.debug("Worker: %s rps: %s (requested rps: %s)" %
                  (i, real_rps, rps))

        # try to join latest thread(s) until it finished, or until time to
        # start new thread (if we have concurrent slots available)
        while i / (time.time() - start) > rps or len(pool) >= max_concurrent:
            if pool:
                pool[0].join(0.001)
                if not pool[0].isAlive():
                    pool.popleft()
            else:
                time.sleep(0.001)

    while pool:
        thr = pool.popleft()
        thr.join()
Exemple #4
0
def _worker_process(queue, iteration_gen, timeout, rps, times,
                    max_concurrent, context, cls, method_name,
                    args, aborted, info):
    """Start scenario within threads.

    Spawn N threads per second. Each thread runs the scenario once, and appends
    result to queue. A maximum of max_concurrent threads will be ran
    concurrently.

    :param queue: queue object to append results
    :param iteration_gen: next iteration number generator
    :param timeout: operation's timeout
    :param rps: number of scenario iterations to be run per one second
    :param times: total number of scenario iterations to be run
    :param max_concurrent: maximum worker concurrency
    :param context: scenario context object
    :param cls: scenario class
    :param method_name: scenario method name
    :param args: scenario args
    :param aborted: multiprocessing.Event that aborts load generation if
                    the flag is set
    :param info: info about all processes count and counter of runned process
    """

    pool = collections.deque()
    sleep = 1.0 / rps

    runner._log_worker_info(times=times, rps=rps, timeout=timeout,
                            cls=cls, method_name=method_name, args=args)

    time.sleep(
        (sleep * info["processes_counter"]) / info["processes_to_start"])

    start = time.time()
    timeout_queue = Queue.Queue()

    if timeout:
        collector_thr_by_timeout = threading.Thread(
            target=utils.timeout_thread,
            args=(timeout_queue, )
        )
        collector_thr_by_timeout.start()

    i = 0
    while i < times and not aborted.is_set():
        scenario_context = runner._get_scenario_context(next(iteration_gen),
                                                        context)
        worker_args = (queue, cls, method_name, scenario_context, args)
        thread = threading.Thread(target=runner._worker_thread,
                                  args=worker_args)

        i += 1
        thread.start()
        if timeout:
            timeout_queue.put((thread, time.time() + timeout))
        pool.append(thread)

        time_gap = time.time() - start
        real_rps = i / time_gap if time_gap else "Infinity"

        LOG.debug("Worker: %s rps: %s (requested rps: %s)" %
                  (i, real_rps, rps))

        # try to join latest thread(s) until it finished, or until time to
        # start new thread (if we have concurrent slots available)
        while i / (time.time() - start) > rps or len(pool) >= max_concurrent:
            if pool:
                pool[0].join(0.001)
                if not pool[0].isAlive():
                    pool.popleft()
            else:
                time.sleep(0.001)

    while pool:
        pool.popleft().join()

    if timeout:
        timeout_queue.put((None, None,))
        collector_thr_by_timeout.join()
Exemple #5
0
def _worker_process(queue, iteration_gen, timeout, concurrency, times, context,
                    cls, method_name, args, event_queue, aborted, info):
    """Start the scenario within threads.

    Spawn threads to support scenario execution for a fixed number of times.
    This generates a constant load on the cloud under test by executing each
    scenario iteration without pausing between iterations. Each thread runs
    the scenario method once with passed scenario arguments and context.
    After execution the result is appended to the queue.

    :param queue: queue object to append results
    :param iteration_gen: next iteration number generator
    :param timeout: operation's timeout
    :param concurrency: number of concurrently running scenario iterations
    :param times: total number of scenario iterations to be run
    :param context: scenario context object
    :param cls: scenario class
    :param method_name: scenario method name
    :param args: scenario args
    :param event_queue: queue object to append events
    :param aborted: multiprocessing.Event that aborts load generation if
                    the flag is set
    :param info: info about all processes count and counter of launched process
    """

    pool = collections.deque()
    alive_threads_in_pool = 0
    finished_threads_in_pool = 0

    runner._log_worker_info(times=times,
                            concurrency=concurrency,
                            timeout=timeout,
                            cls=cls,
                            method_name=method_name,
                            args=args)

    if timeout:
        timeout_queue = Queue.Queue()
        collector_thr_by_timeout = threading.Thread(
            target=utils.timeout_thread, args=(timeout_queue, ))
        collector_thr_by_timeout.start()

    iteration = next(iteration_gen)
    while iteration < times and not aborted.is_set():
        scenario_context = runner._get_scenario_context(iteration, context)
        worker_args = (queue, cls, method_name, scenario_context, args,
                       event_queue)

        thread = threading.Thread(target=runner._worker_thread,
                                  args=worker_args)

        thread.start()
        if timeout:
            timeout_queue.put((thread, time.time() + timeout))
        pool.append(thread)
        alive_threads_in_pool += 1

        while alive_threads_in_pool == concurrency:
            prev_finished_threads_in_pool = finished_threads_in_pool
            finished_threads_in_pool = 0
            for t in pool:
                if not t.isAlive():
                    finished_threads_in_pool += 1

            alive_threads_in_pool -= finished_threads_in_pool
            alive_threads_in_pool += prev_finished_threads_in_pool

            if alive_threads_in_pool < concurrency:
                # NOTE(boris-42): cleanup pool array. This is required because
                # in other case array length will be equal to times which
                # is unlimited big
                while pool and not pool[0].isAlive():
                    pool.popleft().join()
                    finished_threads_in_pool -= 1
                break

            # we should wait to not create big noise with these checks
            time.sleep(0.001)
        iteration = next(iteration_gen)

    # Wait until all threads are done
    while pool:
        pool.popleft().join()

    if timeout:
        timeout_queue.put((
            None,
            None,
        ))
        collector_thr_by_timeout.join()
Exemple #6
0
def _worker_process(queue, iteration_gen, timeout, concurrency, times,
                    duration, context, cls, method_name, args, event_queue,
                    aborted, info):
    """Start the scenario within threads.

    Spawn threads to support scenario execution.
    Scenario is ran for a fixed number of times if times is specified
    Scenario is ran for fixed duration if duration is specified.
    This generates a constant load on the cloud under test by executing each
    scenario iteration without pausing between iterations. Each thread runs
    the scenario method once with passed scenario arguments and context.
    After execution the result is appended to the queue.

    :param queue: queue object to append results
    :param iteration_gen: next iteration number generator
    :param timeout: operation's timeout
    :param concurrency: number of concurrently running scenario iterations
    :param times: total number of scenario iterations to be run
    :param duration: total duration in seconds of the run
    :param context: scenario context object
    :param cls: scenario class
    :param method_name: scenario method name
    :param args: scenario args
    :param event_queue: queue object to append events
    :param aborted: multiprocessing.Event that aborts load generation if
                    the flag is set
    :param info: info about all processes count and counter of launched process
    """
    def _to_be_continued(iteration, current_duration, aborted, times=None,
                         duration=None):
        if times is not None:
            return iteration < times and not aborted.is_set()
        elif duration is not None:
            return current_duration < duration and not aborted.is_set()
        else:
            return False

    if times is None and duration is None:
        raise ValueError("times or duration must be specified")

    pool = collections.deque()
    alive_threads_in_pool = 0
    finished_threads_in_pool = 0

    runner._log_worker_info(times=times, duration=duration,
                            concurrency=concurrency, timeout=timeout, cls=cls,
                            method_name=method_name, args=args)

    if timeout:
        timeout_queue = Queue.Queue()
        collector_thr_by_timeout = threading.Thread(
            target=utils.timeout_thread,
            args=(timeout_queue, )
        )
        collector_thr_by_timeout.start()

    iteration = next(iteration_gen)
    start_time = time.time()
    # NOTE(msimonin): keep the previous behaviour
    # > when duration is 0, scenario executes exactly 1 time
    current_duration = -1
    while _to_be_continued(iteration, current_duration, aborted,
                           times=times, duration=duration):

        scenario_context = runner._get_scenario_context(iteration, context)
        worker_args = (
            queue, cls, method_name, scenario_context, args, event_queue)

        thread = threading.Thread(target=runner._worker_thread,
                                  args=worker_args)

        thread.start()
        if timeout:
            timeout_queue.put((thread, time.time() + timeout))
        pool.append(thread)
        alive_threads_in_pool += 1

        while alive_threads_in_pool == concurrency:
            prev_finished_threads_in_pool = finished_threads_in_pool
            finished_threads_in_pool = 0
            for t in pool:
                if not t.isAlive():
                    finished_threads_in_pool += 1

            alive_threads_in_pool -= finished_threads_in_pool
            alive_threads_in_pool += prev_finished_threads_in_pool

            if alive_threads_in_pool < concurrency:
                # NOTE(boris-42): cleanup pool array. This is required because
                # in other case array length will be equal to times which
                # is unlimited big
                while pool and not pool[0].isAlive():
                    pool.popleft().join()
                    finished_threads_in_pool -= 1
                break

            # we should wait to not create big noise with these checks
            time.sleep(0.001)
        iteration = next(iteration_gen)
        current_duration = time.time() - start_time

    # Wait until all threads are done
    while pool:
        pool.popleft().join()

    if timeout:
        timeout_queue.put((None, None,))
        collector_thr_by_timeout.join()