Exemplo n.º 1
0
    def run(self, logger, backend):
        """ Runs all tests using Thread pool

        When called this method will flatten out self.tests into
        self.test_list, then will prepare a logger, and begin executing tests
        through it's Thread pools.

        Based on the value of options.OPTIONS.concurrent it will either run all
        the tests concurrently, all serially, or first the thread safe tests
        then the serial tests.

        Finally it will print a final summary of the tests

        Arguments:
        backend -- a results.Backend derived instance

        """

        self._pre_run_hook()

        chunksize = 1

        self._prepare_test_list()
        log = LogManager(logger, len(self.test_list))

        def test(pair, this_pool=None):
            """Function to call test.execute from map"""
            name, test = pair
            with backend.write_test(name) as w:
                test.execute(name, log.get(), self.dmesg, self.monitoring)
                w(test.result)
            if self._monitoring.abort_needed:
                this_pool.terminate()

        def run_threads(pool, testlist):
            """ Open a pool, close it, and join it """
            pool.imap(lambda pair: test(pair, pool), testlist, chunksize)
            pool.close()
            pool.join()

        # Multiprocessing.dummy is a wrapper around Threading that provides a
        # multiprocessing compatible API
        #
        # The default value of pool is the number of virtual processor cores
        single = multiprocessing.dummy.Pool(1)
        multi = multiprocessing.dummy.Pool()

        try:
            if options.OPTIONS.concurrent == "all":
                run_threads(multi, six.iteritems(self.test_list))
            elif options.OPTIONS.concurrent == "none":
                run_threads(single, six.iteritems(self.test_list))
            else:
                # Filter and return only thread safe tests to the threaded pool
                run_threads(multi, (x for x in six.iteritems(self.test_list)
                                    if x[1].run_concurrent))
                # Filter and return the non thread safe tests to the single
                # pool
                run_threads(single, (x for x in six.iteritems(self.test_list)
                                     if not x[1].run_concurrent))

            log.get().summary()
        except (KeyboardInterrupt, Exception):
            # In the event that C-c is pressed, or any sort of exception would
            # generate a stacktrace, print the status line so that it's clear,
            # then die. Pressing C-c again will kill immediately.
            log.get().summary()
            raise

        self._post_run_hook()

        if self._monitoring.abort_needed:
            raise exceptions.PiglitAbort(self._monitoring.error_message)
Exemplo n.º 2
0
def run(profiles, logger, backend, concurrency):
    """Runs all tests using Thread pool.

    When called this method will flatten out self.tests into self.test_list,
    then will prepare a logger, and begin executing tests through it's Thread
    pools.

    Based on the value of options.OPTIONS.concurrent it will either run all the
    tests concurrently, all serially, or first the thread safe tests then the
    serial tests.

    Finally it will print a final summary of the tests.

    Arguments:
    profiles -- a list of Profile instances.
    logger   -- a log.LogManager instance.
    backend  -- a results.Backend derived instance.
    """
    chunksize = 1

    # The logger needs to know how many tests are running. Because of filters
    # there's no way to do that without making a concrete list out of the
    # filters profiles.
    profiles = [(p, list(p.itertests())) for p in profiles]
    log = LogManager(logger, sum(len(l) for _, l in profiles))

    def test(name, test, profile, this_pool=None):
        """Function to call test.execute from map"""
        with backend.write_test(name) as w:
            test.execute(name, log.get(), profile.options)
            w(test.result)
        if profile.options['monitor'].abort_needed:
            this_pool.terminate()

    def run_threads(pool, profile, test_list, filterby=None):
        """ Open a pool, close it, and join it """
        if filterby:
            # Although filterby could be attached to TestProfile as a filter,
            # it would have to be removed when run_threads exits, requiring
            # more code, and adding side-effects
            test_list = (x for x in test_list if filterby(x))

        pool.imap(lambda pair: test(pair[0], pair[1], profile, pool),
                  test_list, chunksize)

    def run_profile(profile, test_list):
        """Run an individual profile."""
        profile.setup()
        if concurrency == "all":
            run_threads(multi, profile, test_list)
        elif concurrency == "none":
            run_threads(single, profile, test_list)
        else:
            assert concurrency == "some"
            # Filter and return only thread safe tests to the threaded pool
            run_threads(multi, profile, test_list,
                        lambda x: x[1].run_concurrent)

            # Filter and return the non thread safe tests to the single
            # pool
            run_threads(single, profile, test_list,
                        lambda x: not x[1].run_concurrent)
        profile.teardown()

    # Multiprocessing.dummy is a wrapper around Threading that provides a
    # multiprocessing compatible API
    #
    # The default value of pool is the number of virtual processor cores
    single = multiprocessing.dummy.Pool(1)
    multi = multiprocessing.dummy.Pool()

    try:
        for p in profiles:
            run_profile(*p)

        for pool in [single, multi]:
            pool.close()
            pool.join()
    finally:
        log.get().summary()

    for p, _ in profiles:
        if p.options['monitor'].abort_needed:
            raise exceptions.PiglitAbort(p.options['monitor'].error_message)
Exemplo n.º 3
0
def run(profiles, logger, backend, concurrency, jobs):
    """Runs all tests using Thread pool.

    When called this method will flatten out self.tests into self.test_list,
    then will prepare a logger, and begin executing tests through it's Thread
    pools.

    Based on the value of concurrency it will either run all the tests
    concurrently, all serially, or first the thread safe tests then the
    serial tests.

    Finally it will print a final summary of the tests.

    Arguments:
    profiles -- a list of Profile instances.
    logger   -- a log.LogManager instance.
    backend  -- a results.Backend derived instance.
    jobs     -- maximum number of concurrent jobs. Use os.cpu_count() by default
    """
    chunksize = 1

    profiles = [(p, p.itertests()) for p in profiles]
    log = LogManager(logger, sum(len(p) for p, _ in profiles))

    # check that after the filters are run there are actually tests to run.
    # if not any(l for _, l in profiles):
    # raise exceptions.PiglitUserError('no matching tests')

    def test(name, test, profile, this_pool=None):
        """Function to call test.execute from map"""
        with backend.write_test(name) as w:
            test.execute(name, log.get(), profile.options)
            w(test.result)
        if profile.options['monitor'].abort_needed:
            this_pool.terminate()

    def run_threads(pool, profile, test_list, filterby=None):
        """ Open a pool, close it, and join it """
        if filterby:
            # Although filterby could be attached to TestProfile as a filter,
            # it would have to be removed when run_threads exits, requiring
            # more code, and adding side-effects
            test_list = (x for x in test_list if filterby(x))

        for n, t in test_list:
            pool.apply_async(test, [n, t, profile, pool])

    def run_profile(profile, test_list):
        """Run an individual profile."""
        profile.setup()
        if concurrency == "all":
            run_threads(multi, profile, test_list)
        elif concurrency == "none":
            run_threads(single, profile, test_list)
        else:
            assert concurrency == "some"
            # test_list is an iterator, we need to copy it to run it twice.
            test_list = itertools.tee(test_list, 2)

            # Filter and return only thread safe tests to the threaded pool
            run_threads(multi, profile, test_list[0],
                        lambda x: x[1].run_concurrent)

            # Filter and return the non thread safe tests to the single
            # pool
            run_threads(single, profile, test_list[1],
                        lambda x: not x[1].run_concurrent)
        profile.teardown()

    # Multiprocessing.dummy is a wrapper around Threading that provides a
    # multiprocessing compatible API
    #
    # The default value of pool is the number of virtual processor cores
    single = multiprocessing.dummy.Pool(1)
    multi = multiprocessing.dummy.Pool(jobs)

    try:
        for p in profiles:
            run_profile(*p)

        for pool in [single, multi]:
            pool.close()
            pool.join()
    finally:
        log.get().summary()

    for p, _ in profiles:
        if p.options['monitor'].abort_needed:
            raise exceptions.PiglitAbort(p.options['monitor'].error_message)