def test_different_keys(self):
        key1, key2 = uuid.uuid4().hex, uuid.uuid4().hex

        def get_result(key):
            return lambda idx: requests.post("http://localhost:5000/", headers={'X-Idempotent-Key': key}).text

        pool = multiprocessing.pool.ThreadPool(8)
        map_1 = pool.map_async(get_result(key1), range(4))
        map_2 = pool.map_async(get_result(key2), range(4))

        results_1 = map_1.get()
        results_2 = map_2.get()
        self.assertEqual(len(set(results_1)), 1, 'All results for idempotent request were not the same')
        self.assertEqual(len(set(results_2)), 1, 'All results for idempotent request were not the same')
        self.assertNotEqual(results_1[0], results_2[0], 'Got same result for both idempotent requests')
def multiRun():
    pool = ThreadPool(cpu_count() * 16)
    global ip_list
    global results
    results = pool.map_async(Run, ip_list)
    pool.close()
    pool.join()
Exemplo n.º 3
0
def fast_process(fn_t, fn_read, shape, tally_depth, ds, iw, ih, categories,
                 fieldmap, thresh, labelcat, batch_size, ahead, verbose,
                 parallel):
    psize = int(numpy.ceil(float(ds.size()) / parallel))
    ranges = [(s, min(ds.size(), s + psize))
              for s in range(0, ds.size(), psize) if s < ds.size()]
    parallel = len(ranges)
    original_sigint_handler = setup_sigint()
    # pool = multiprocessing.Pool(processes=parallel, initializer=setup_sigint)
    pool = multiprocessing.pool.ThreadPool(processes=parallel)
    restore_sigint(original_sigint_handler)
    # Precache memmaped files
    blobdata = cached_memmap(fn_read, mode='r', dtype='float32', shape=shape)
    count_t = cached_memmap(fn_t,
                            mode='r+',
                            dtype='int32',
                            shape=(ds.size(), tally_depth, 3))
    data = [(fn_t, fn_read, shape, tally_depth, ds, iw, ih, categories,
             fieldmap, thresh, labelcat, batch_size, ahead, verbose, True) + r
            for r in ranges]
    try:
        result = pool.map_async(individual_process, data)
        result.get(31536000)
    except KeyboardInterrupt:
        print("Caught KeyboardInterrupt, terminating workers")
        pool.terminate()
        raise
    else:
        pool.close()
    pool.join()
    def test_excepthook(self):
        output = OutputView()
        output.resize(500, 300)
        output.show()

        red_formater = output.formated(color=Qt.red)

        red = TextStream()
        red.stream.connect(red_formater.write)

        hook = ExceptHook(stream=red)

        def raise_exception(i):
            try:
                if i % 2 == 0:
                    raise ValueError("odd")
                else:
                    raise ValueError("even")
            except Exception:
                # explicitly call hook (Thread class has it's own handler)
                hook(*sys.exc_info())

        pool = multiprocessing.pool.ThreadPool(10)
        res = pool.map_async(raise_exception, range(100))

        self.app.exec_()

        res.wait()
Exemplo n.º 5
0
def run_simulation(func, vals, parameters, fname_i, N=None, overwrite=False):
    """Run a simulation where one loops over `vals`. The simulation
    yields len(vals) results, but by using `N`, you can split it up
    in parts of length N.

    Parameters
    ----------
    lview : ipyparallel.client.view.LoadBalancedView object
        LoadBalancedView for asynchronous map.
    func : function
        Function that takes a list of arguments: `vals`.
    vals : list
        Arguments for `func`.
    parameters : dict
        Dictionary that is saved with the data, used for constant
        parameters.
    fname_i : str
        Name for the resulting HDF5 files. If the simulation is
        split up in parts by using the `N` argument, it needs to
        be a formatteble string, for example 'file_{}'.
    N : int
        Number of results in each pandas.DataFrame.
    overwrite : bool
        Overwrite the file even if it already exists.
    """
    if N is None:
        N = 1000000
        if len(vals) > N:
            raise Exception('You need to split up vals in smaller parts')

    N_files = len(vals) // N + (0 if len(vals) % N == 0 else 1)
    print('`vals` will be split in {} files.'.format(N_files))
    time_elapsed = 0
    parts_done = 0
    for i, chunk in enumerate(partition_all(N, vals)):
        fname = fname_i  #.replace('{}', '{:03d}') .format(i)
        print('Busy with file: {}.'.format(fname))
        if not os.path.exists(fname) or overwrite:
            map_async = pool.map_async(func, chunk)
            #            map_async = lview.map_async(func, chunk)
            map_async.wait_interactive()
            result = map_async.result()
            df = pd.DataFrame(result)
            df = df.assign(**parameters)
            df = df.assign(git_hash=get_git_revision_hash())
            os.makedirs(os.path.dirname(fname), exist_ok=True)
            df.to_hdf(fname, 'all_data', mode='w', complib='zlib', complevel=9)

            # Print useful information
            N_files_left = N_files - (i + 1)
            parts_done += 1
            time_elapsed += map_async.elapsed
            time_left = timedelta(seconds=(time_elapsed / parts_done) *
                                  N_files_left)
            print_str = ('Saved {}, {} more files to go, {} time left '
                         'before everything is done.')
            print(print_str.format(fname, N_files_left, time_left))
        else:
            print('File: {} was already done.'.format(fname))
Exemplo n.º 6
0
def wrapper(cmd, timeout=None):
    with multiprocessing.Pool(processes=1) as pool:
        try:
            result = []
            r = pool.map_async(call_command, [cmd], callback=result.append)
            r.get(timeout)
            return result[0][0]
        except multiprocessing.TimeoutError:
            return (None,-1)
Exemplo n.º 7
0
def threading_test_runner_pool(num_threads, test_work_items):
    # Initialize our global state.
    initialize_global_vars_threading(num_threads, test_work_items)

    pool = multiprocessing.pool.ThreadPool(num_threads)
    map_future = pool.map_async(
        process_dir_worker_threading_pool, test_work_items)

    return map_async_run_loop(
        map_future, RUNNER_PROCESS_ASYNC_MAP, RESULTS_LISTENER_CHANNEL)
Exemplo n.º 8
0
    def test_threadsafe(self):
        output = OutputView()
        output.resize(500, 300)
        output.show()

        blue_formater = output.formated(color=Qt.blue)
        red_formater = output.formated(color=Qt.red)

        correct = []

        def check_thread(*args):
            correct.append(QThread.currentThread() == self.app.thread())

        blue = TextStream()
        blue.stream.connect(blue_formater.write)
        blue.stream.connect(check_thread)

        red = TextStream()
        red.stream.connect(red_formater.write)
        red.stream.connect(check_thread)

        def printer(i):
            if i % 12 == 0:
                fizzbuz = "fizzbuz"
            elif i % 4 == 0:
                fizzbuz = "buz"
            elif i % 3 == 0:
                fizzbuz = "fizz"
            else:
                fizzbuz = str(i)

            if i % 2:
                writer = blue
            else:
                writer = red

            writer.write("Greetings from thread {0}. "
                         "This is {1}\n".format(current_thread().name,
                                                fizzbuz))

        pool = multiprocessing.pool.ThreadPool(100)
        res = pool.map_async(printer, range(10000))

        self.app.exec_()

        res.wait()

        # force all pending enqueued emits
        QCoreApplication.sendPostedEvents(blue, QEvent.MetaCall)
        QCoreApplication.sendPostedEvents(red, QEvent.MetaCall)
        self.app.processEvents()

        self.assertTrue(all(correct))
        self.assertEqual(len(correct), 10000)
Exemplo n.º 9
0
    def test_threadsafe(self):
        output = OutputView()
        output.resize(500, 300)
        output.show()

        blue_formater = output.formatted(color=Qt.blue)
        red_formater = output.formatted(color=Qt.red)

        correct = []

        def check_thread(*args):
            correct.append(QThread.currentThread() == self.app.thread())

        blue = TextStream()
        blue.stream.connect(blue_formater.write)
        blue.stream.connect(check_thread)

        red = TextStream()
        red.stream.connect(red_formater.write)
        red.stream.connect(check_thread)

        def printer(i):
            if i % 12 == 0:
                fizzbuz = "fizzbuz"
            elif i % 4 == 0:
                fizzbuz = "buz"
            elif i % 3 == 0:
                fizzbuz = "fizz"
            else:
                fizzbuz = str(i)

            if i % 2:
                writer = blue
            else:
                writer = red

            writer.write("Greetings from thread {0}. "
                         "This is {1}\n".format(current_thread().name,
                                                fizzbuz))

        pool = multiprocessing.pool.ThreadPool(100)
        res = pool.map_async(printer, range(10000))
        self.qWait()
        res.wait()

        # force all pending enqueued emits
        QCoreApplication.sendPostedEvents(blue, QEvent.MetaCall)
        QCoreApplication.sendPostedEvents(red, QEvent.MetaCall)
        self.app.processEvents()

        self.assertTrue(all(correct))
        self.assertEqual(len(correct), 10000)
        pool.close()
Exemplo n.º 10
0
    def start_lifecycle_checks(self, state):
        """Check if a particular lifecycle state has been reached by executing
        all its defined checks. If not checks are defined, it is assumed the
        state is reached immediately."""

        if state not in self._lifecycle:
            # Return None to indicate no checks were performed.
            return None

        pool = multiprocessing.pool.ThreadPool()
        return pool.map_async(lambda check: check.test(),
                              self._lifecycle[state])
Exemplo n.º 11
0
def map_async(nr_procs, func, args_iter, args_kw=None, daemonic=True,
              pool=None):
    """Wrap python's ``map_async``

    This has some utility stuff like star passthrough

    Run func on nr_procs with arguments given by args_iter. args_iter
    should be an iterable of the list of arguments that can be unpacked
    for each invocation. kwargs are passed to func as keyword arguments

    Returns:
        (tuple) (pool, multiprocessing.pool.AsyncResult)

    Note: daemonic can be set to False if one needs to spawn child
        processes in func, BUT this could be vulnerable to creating
        an undead army of worker processes, only use this if you
        really really need it, and know what you're doing

    Example:
        >>> func = lambda i, letter: print i, letter
        >>> p, r = map_async(2, func, itertools.izip(itertools.count(), 'abc'))
        >>> r.get(1e8)
        >>> p.join()
        >>> # the following is printed from 2 processes
        0 a
        1 b
        2 c
    """
    if sys.platform == 'darwin' and ("mayavi.mlab" in sys.modules or
                                     "mayavi" in sys.modules):
        import mayavi
        if mayavi.ETSConfig.toolkit == 'qt4':
            viscid.logger.critical("Using multiprocessing with Mayavi + Qt4 "
                                   "will cause segfaults on join.\n"
                                   "A workaround is to use the wx backend "
                                   "(`os.environ['ETS_TOOLKIT'] = 'wx'`).")

    if args_kw is None:
        args_kw = {}
    args_iter = izip(repeat(func), args_iter, repeat(args_kw))

    # if given a pool, don't close it when we're done delegating tasks
    if pool is not None:
        return pool, pool.map_async(_star_passthrough, args_iter)
    else:
        if daemonic:
            pool = mp.Pool(nr_procs)
        else:
            pool = NoDaemonPool(nr_procs)

        with closing(pool) as p:
            return p, p.map_async(_star_passthrough, args_iter)
    def test_threadsafe(self):
        output = OutputView()
        output.resize(500, 300)
        output.show()

        blue_formater = output.formated(color=Qt.blue)
        red_formater = output.formated(color=Qt.red)

        correct = []

        def check_thread(*args):
            correct.append(QThread.currentThread() == self.app.thread())

        blue = TextStream()
        blue.stream.connect(blue_formater.write)
        blue.stream.connect(check_thread)

        red = TextStream()
        red.stream.connect(red_formater.write)
        red.stream.connect(check_thread)

        def printer(i):
            if i % 12 == 0:
                fizzbuz = "fizzbuz"
            elif i % 4 == 0:
                fizzbuz = "buz"
            elif i % 3 == 0:
                fizzbuz = "fizz"
            else:
                fizzbuz = str(i)

            if i % 2:
                writer = blue
            else:
                writer = red

            writer.write(
                "Greetings from thread {0}. "
                "This is {1}\n".format(current_thread().name, fizzbuz)
            )

        pool = multiprocessing.pool.ThreadPool(100)
        res = pool.map_async(printer, range(10000))

        self.app.exec_()

        res.wait()

        self.assertTrue(all(correct))
        self.assertTrue(len(correct) == 10000)
Exemplo n.º 13
0
def map_sequentially_or_concurrently(
        elements,
        function,
        concurrent=None,
        randomize=False,
        chunksize=1,
        pool_class=multiprocessing.pool.ThreadPool,
        **kwargs):
    element_count = len(elements)
    for element in elements:
        element.set_cancel(False)
    if randomize:
        elements = random.sample(elements, k=len(elements))
    if concurrent:
        try:
            pool = pool_class(multiprocessing.cpu_count())
            partially_applied_function = functools.partial(
                call_with_capturing_output,
                function=function,
                elements=elements,
                element_count=element_count,
                **kwargs)
            results = pool.map_async(partially_applied_function,
                                     elements,
                                     chunksize=chunksize)
            return results.get(0xFFFF)
        except KeyboardInterrupt:
            for element in elements:
                element.set_cancel(True)
            return results.get(0xFFFF)
    else:
        keyboard_interrupt_handler = KeyboardInterruptHandler()
        with DisabledKeyboardInterrupts(keyboard_interrupt_handler):
            results = []
            cancel = False
            element_index = 0
            for element in elements:
                result = function(
                    element,
                    keyboard_interrupt_handler=keyboard_interrupt_handler,
                    cancel=cancel,
                    index=element_index,
                    count=element_count,
                    **kwargs)
                if result.result == "CANCEL":
                    cancel = True
                results.append(result)
                element_index = element_index + 1
            return results
Exemplo n.º 14
0
def linearStatusgetter(taskList,resubmitList,killList):
    q=[]
    finished=[]
    for itask,task in enumerate(taskList):
        if task.frontEndStatus=="RETRIEVED":
            finished.append(task)
            continue
        q.append([task,resubmitList[itask],killList[itask]])
    pool = multiprocessing.Pool(10)
    result = pool.map_async(checkTask, q)
    pool.close()
    #pool.join()
    while pool._cache:
        time.sleep(1)
    res = result.get()
    taskList=res+finished
    return taskList
Exemplo n.º 15
0
def multiprocessing_test_runner_pool(num_threads, test_work_items):
    # Initialize our global state.
    initialize_global_vars_multiprocessing(num_threads, test_work_items)

    manager = multiprocessing.Manager()
    worker_index_map = manager.dict()

    pool = multiprocessing.Pool(
        num_threads,
        initializer=setup_global_variables,
        initargs=(output_lock, test_counter, total_tests, test_name_len,
                  dotest_options, worker_index_map))

    # Start the map operation (async mode).
    map_future = pool.map_async(
        process_dir_worker_multiprocessing_pool, test_work_items)
    return map_async_run_loop(
        map_future, RUNNER_PROCESS_ASYNC_MAP, RESULTS_LISTENER_CHANNEL)
Exemplo n.º 16
0
    def start_lifecycle_checks(self, state):
        """Check if a particular lifecycle state has been reached by executing
        all its defined checks. If not checks are defined, it is assumed the
        state is reached immediately."""

        if state not in self._lifecycle:
            # Return None to indicate no checks were performed.
            return None

        # HACK: Workaround for Python bug #10015 (also #14881). Fixed in
        # Python >= 2.7.5 and >= 3.3.2.
        thread = threading.current_thread()
        if not hasattr(thread, "_children"):
            thread._children = weakref.WeakKeyDictionary()

        pool = multiprocessing.pool.ThreadPool()
        return pool.map_async(lambda check: check.test(),
                              self._lifecycle[state])
Exemplo n.º 17
0
    def start_lifecycle_checks(self, state):
        """Check if a particular lifecycle state has been reached by executing
        all its defined checks. If not checks are defined, it is assumed the
        state is reached immediately."""

        if state not in self._lifecycle:
            # Return None to indicate no checks were performed.
            return None

        # HACK: Workaround for Python bug #10015 (also #14881). Fixed in
        # Python >= 2.7.5 and >= 3.3.2.
        thread = threading.current_thread()
        if not hasattr(thread, "_children"):
            thread._children = weakref.WeakKeyDictionary()

        pool = multiprocessing.pool.ThreadPool()
        return pool.map_async(lambda check: check.test(),
                              self._lifecycle[state])
Exemplo n.º 18
0
    def master_progress(mpu,num_processes,bucket,upload_list):
	    x=0
	    print "proc = ?? "  + str(num_processes)
	    while True:
		try:
			if x!=num_parts:
	#			logger.error(str(src.name) +" start " )
				pool = NoDaemonProcessPool(processes=num_processes)
				value = pool.map_async(do_part_upload, gen_args(x,fold_last,upload_list)).get(99999999)
				print "when to finish??????"
#			print "dadadada " + str(value)
			que.put(value)
			src.close()
                        mpu.complete_upload()
			logger.error(str(src.name) +" stop " )
			#proc = subprocess.Popen('date', stdout=subprocess.PIPE)
			#print stdout
			print "mpu.complete src name " +src.name
			#os.remove(src.name)
			#print "index in proc = "+str(FileList.index(uploadFileNames))
			lock.acquire()
			status_list[FileList.index(uploadFileNames)]='finish'
			print src.name +" finish  "+str (status_list)
			critical_threadnum(Total_Threadnum,Threadnum,num_processes)
			print uploadFileNames +" add back now is   " + str(Threadnum.value)
			lock.release()
			src.close()
			return value
#			pool.terminate()
                        break
		except KeyboardInterrupt:
			logger.warn("Received KeyboardInterrupt, canceling upload")
			pool.terminate()
			mpu.cancel_upload()
			print "keyboarddddddddddddddddddddddddddddddd"
			break
		except IOError:
			break
		except Exception, err:
			logger.error("Encountered an error, canceling upload aaaaaaaaaaaa")
			print src.name
			logger.error(str(src.name)+str(err))
Exemplo n.º 19
0
    with lock:
        print("lock acquired")
        total_points += nb_in

    print("Stopping thread", threading.current_thread().name)


if __name__ == "__main__":
    # Declaring lock and shared number of points
    lock = threading.Lock()
    total_points = 0

    print(f"Starting thread {threading.current_thread().name}")

    pts_thread = int(
        sys.argv[1])  # First argument : number of points by thread
    nb_thread = int(sys.argv[2])  # Second argument : number of threads

    print(f"Start {nb_thread} with {pts_thread} points each")

    # Create a thread pool and execute nb_thread times
    pool = pool.ThreadPool(processes=nb_thread)
    pool.map_async(count_points_in, [pts_thread] * nb_thread).get()

    print(total_points)

    with lock:
        print(4 * total_points / pts_thread / nb_thread)
        print(f"Ending thread {threading.current_thread().name}")
Exemplo n.º 20
0
    def __init__(self, num_threads):
        self.tasks = Queue()
        for _ in range(num_threads):
            Fetcher(self.tasks)
    
    def add_task(self, url):
        self.tasks.put(url)

    def wait_completion(self):
        self.tasks.join()


if __name__ == '__main__':
    if len(sys.argv) == 1:
        #self-defined threadpool
        start = time.time()
        pool = ThreadPool(4)
        pool.add_task('/')
        pool.wait_completion()
        print('{} URLs fetched in {:.1f} seconds'.format(len(seen_urls), time.time()-start))
    elif sys.argv[1] == '-s':
        # system threadpool
        start = time.time()
        pool = multiprocessing.pool.ThreadPool()
        tasks = Queue()
        tasks.put('/')
        workers = [Fetcher(tasks) for i in range(4)]
        pool.map_async(lambda w:w.run(), workers)
        tasks.join()
        pool.close()
        print('{} URLs fetched in {:.1f} seconds'.format(len(seen_urls), time.time()-start))
Exemplo n.º 21
0
    return fn(*args)


def run_star(args):
    return run_download(*args)


if __name__ == '__main__':
    kaggle_int = 'kaggle.ini'

    if not os.path.exists(kaggle_int):
        print("Please create kaggle.ini first. See kaggle.ini.sample.")
        exit()

    competition, destination = read_args()
    username, password = read_config(kaggle_int)

    if username == "*****@*****.**" or password == "KAGGLE_PASSWORD":
        print(
            "Please setup kaggle.ini using your kaggle username and password.")

    else:
        session = login(username, password)
        data_url_list = get_data_url_by_name(competition)

        pool = pool.Pool()
        tasks = [(download, (url, session, destination))
                 for url in data_url_list]
        results = pool.map_async(run_star, tasks)
        results.wait()
Exemplo n.º 22
0
    def filter(self, items: Iterable[Any]) -> Iterable[Any]:

        if len(self._filters) == 0:
            return items

        try:
            with Manager() as manager:

                stdout_queue = manager.Queue()  #type: ignore
                stdlog_queue = manager.Queue()  #type: ignore

                stdout_writer, stdout_reader = QueueSink(
                    stdout_queue), QueueSource(stdout_queue)
                stdlog_writer, stdlog_reader = QueueSink(
                    stdlog_queue), QueueSource(stdlog_queue)

                class MyPool(multiprocessing.pool.Pool):

                    _missing_error_definition_error_is_new = True

                    def _join_exited_workers(self):

                        for worker in self._pool:
                            if worker.exitcode == 1000 and MyPool._missing_error_definition_error_is_new:
                                #this is a hack... This only works so long as we just
                                #process one job at a time... This is true in our case.
                                #this is necessary because multiprocessing can get stuck
                                #waiting for failed workers and that is frustrating for users.

                                MyPool._missing_error_definition_error_is_new = False

                                message = (
                                    "Coba attempted to evaluate your benchmark in multiple processes but the pickle module was unable to "
                                    "find all the definitions needed to pass the tasks to the processes. The two most common causes of "
                                    "this error are: 1) a learner or simulation is defined in a Jupyter Notebook cell or 2) a necessary "
                                    "class definition exists inside the `__name__=='__main__'` code block in the main execution script. In "
                                    "either case there are two simple solutions: 1) evalute your benchmark in a single processed with no "
                                    "limit on child tasks or 2) define all you classes in a separate python file that is imported when "
                                    "evaluating.")

                                CobaConfig.Logger.log(message)

                            if worker.exitcode is not None and worker.exitcode != 0:
                                #A worker exited in an uncontrolled manner and was unable to clean its job
                                #up. We therefore mark one of the jobs as "finished" but failed to prevent an
                                #infinite wait on a failed job to finish that is actually no longer running.
                                list(self._cache.values())[0]._set(
                                    None, (False, None))

                        return super()._join_exited_workers()

                with MyPool(self._processes,
                            maxtasksperchild=self._maxtasksperchild) as pool:

                    # handle not picklable (this is handled by done_or_failed)
                    # handle empty list (this is done by checking result.ready())
                    # handle exceptions in process (unhandled exceptions can cause children to hang so we pass them to stderr)
                    # handle ctrl-c without hanging
                    #   > don't call result.get when KeyboardInterrupt has been hit
                    #   > handle EOFError,BrokenPipeError errors with queue since ctr-c kills manager
                    # handle AttributeErrors. These occure when... (this is handled by shadowing several pool methods)
                    #   > a class that is defined in a Jupyter Notebook cell is pickled
                    #   > a class that is defined inside the __name__=='__main__' block is pickeled
                    # handle Benchmark.evaluate not being called inside of __name__=='__main__' (this is handled by a big try/catch)

                    def done_or_failed(results_or_exception=None):
                        #This method is called one time at the completion of map_async
                        #in the case that one of our jobs threw an exception the argument
                        #will contain an exception otherwise it will be the returned results
                        #of all the jobs. This method is executed on a thread in the Main context.

                        if isinstance(results_or_exception, Exception):
                            from coba.config import CobaConfig

                            if "Can't pickle" in str(
                                    results_or_exception) or "Pickling" in str(
                                        results_or_exception):

                                message = (
                                    str(results_or_exception) +
                                    ". Coba attempted to process your Benchmark on multiple processes and "
                                    "the named class was not able to be pickled. This problem can be fixed in one of two ways: 1) "
                                    "evaluate the benchmark in question on a single process with no limit on the tasks per child or 2) "
                                    "modify the named class to be picklable. The easiest way to make the given class picklable is to "
                                    "add `def __reduce__ (self) return (<the class in question>, (<tuple of constructor arguments>))` to "
                                    "the class. For more information see https://docs.python.org/3/library/pickle.html#object.__reduce__."
                                )

                                CobaConfig.Logger.log(message)
                            else:
                                CobaConfig.Logger.log_exception(
                                    results_or_exception)

                        stdout_writer.write([None])
                        stdlog_writer.write([None])

                    log_thread = Thread(target=Pipe.join(
                        stdlog_reader, [], CobaConfig.Logger.sink).run)
                    log_thread.daemon = True
                    log_thread.start()

                    processor = MultiprocessFilter.Processor(
                        self._filters, stdout_writer, stdlog_writer,
                        self._processes)
                    result = pool.map_async(processor.process,
                                            items,
                                            callback=done_or_failed,
                                            error_callback=done_or_failed,
                                            chunksize=1)

                    # When items is empty finished_callback will not be called and we'll get stuck waiting for the poison pill.
                    # When items is empty ready() will be true immediately and this check will place the poison pill into the queues.
                    if result.ready(): done_or_failed()

                    try:
                        for item in stdout_reader.read():
                            yield item
                        pool.close()
                    except (KeyboardInterrupt, Exception):
                        try:
                            pool.terminate()
                        except:
                            pass
                        raise
                    finally:
                        pool.join()
                        log_thread.join()

        except RuntimeError as e:
            #This happens when importing main causes this code to run again
            raise CobaFatal(str(e))
Exemplo n.º 23
0
def main():
    test_types = "unit data offscreen replay".split()
    parser = argparse.ArgumentParser()
    parser.add_argument("--smoke", action="store_true")
    parser.add_argument("-t", "--type", action="append", choices=test_types)
    parser.add_argument("test", nargs="*")
    opts = parser.parse_args()

    queue = multiprocessing.Queue()
    pool = multiprocessing.pool.ThreadPool()
    tests = [
        (unit_test, opts, queue, "fixed-test"),
        (data_test, opts, queue, "build-pix", [], ["--text"]),
        (data_test, opts, queue, "object-data"),
        (data_test, opts, queue, "shapes"),
        (data_test, opts, queue, "tint"),
        (offscreen_test, opts, queue, "main-screen"),
        (offscreen_test, opts, queue, "mission-briefing", ["--text"]),
        (offscreen_test, opts, queue, "options"),
        (offscreen_test, opts, queue, "pause", ["--text"]),
        (replay_test, opts, queue, "and-it-feels-so-good"),
        (replay_test, opts, queue, "astrotrash-plus"),
        (replay_test, opts, queue, "blood-toil-tears-sweat"),
        (replay_test, opts, queue, "hand-over-fist"),
        (replay_test, opts, queue, "hornets-nest"),
        (replay_test, opts, queue, "make-way"),
        (replay_test, opts, queue, "moons-for-goons"),
        (replay_test, opts, queue, "out-of-the-frying-pan"),
        (replay_test, opts, queue, "shoplifter-1"),
        (replay_test, opts, queue, "space-race"),
        (replay_test, opts, queue, "the-left-hand"),
        (replay_test, opts, queue, "the-mothership-connection"),
        (replay_test, opts, queue, "the-stars-have-ears"),
        (replay_test, opts, queue, "while-the-iron-is-hot"),
        (replay_test, opts, queue, "yo-ho-ho"),
        (replay_test, opts, queue,
         "you-should-have-seen-the-one-that-got-away"),
    ]

    if opts.test:
        test_map = dict((t[3], t) for t in tests)
        tests = [test_map[test] for test in opts.test]

    if opts.type:
        if "unit" not in opts.type:
            tests = [t for t in tests if t[0] != unit_test]
        if "data" not in opts.type:
            tests = [t for t in tests if t[0] != data_test]
        if "offscreen" not in opts.type:
            tests = [t for t in tests if t[0] != offscreen_test]
        if "replay" not in opts.type:
            tests = [t for t in tests if t[0] != replay_test]

    sys.stderr.write("Running %d tests:\n" % len(tests))
    start = time.time()
    result = pool.map_async(call, tests)
    pool.close()

    failed = handle_queue(queue, tests)

    end = time.time()
    sys.stderr.write("\nRan %d tests in %.2fs\n" % (len(tests), end - start))
    if failed:
        sys.stderr.write("%d tests failed.\n" % failed)
        sys.exit(1)
    else:
        sys.stderr.write("All tests passed!\n")
Exemplo n.º 24
0
    def generate_feature_data(self, ingest_manager, **kwds):
        """ Generate feature data files for ColorDescriptor CSIFT descriptor.

        Works over image files currently.

        Additional key-word arguments:
            parallel: number of parallel sub-processes to utilize. If not
                      provided, uses all available cores.

        :raises ValueError: When there are no images in the given ingest.

        :param ingest_manager: The ingest to create data files over.
        :type ingest_manager: IngestManager

        """
        self._log.info("Generating %s data files for given ingest",
                       self.__class__.__name__)

        parallel = kwds.get('parallel', None)

        if not len(ingest_manager):
            raise ValueError("No images in given ingest. No processing to do")

        self._log.info("Generating features asynchronously")
        args = []
        for i, (uid, filepath) in enumerate(ingest_manager.iteritems()):
            args.append((self, i, self._log.name, uid, filepath))

        pool = multiprocessing.Pool(processes=parallel)
        map_results = pool.map_async(_cd_async_image_feature, args).get()
        r_dict = dict(map_results)
        pool.close()
        pool.join()

        # Filter failed executions -- dict-ifying will cull duplicated None keys
        # do we only have to remove a lingering None key if there is one.
        if None in r_dict:
            del r_dict[None]
        if not map_results:
            raise RuntimeError("All images in ingest failed ColorDescriptor "
                               "feature generation. Cannot proceed.")

        # due to raise conditions above, can assume that there will be at least
        # one feature in r_dict
        self._log.info("Constructing feature matrix and idx-to-uid map")
        num_features = len(r_dict)
        sorted_uids = sorted(r_dict.keys())
        feature_length = len(r_dict[sorted_uids[0]])
        idx2uid_map = numpy.empty(num_features, dtype=numpy.uint32)
        feature_mat = numpy.matlib.empty((num_features, feature_length))
        for idx, uid in enumerate(sorted_uids):
            idx2uid_map[idx] = uid
            feature_mat[idx] = r_dict[uid]

        # flag a leading percentage of the collected IDs as background data
        # (flagging leading vs. random is more deterministic)
        self._log.info("Constructing BG flags map")
        pivot = int(num_features * self.BACKGROUND_RATIO)
        idx2bg_map = numpy.empty(num_features, dtype=numpy.bool)
        bg_clip_ids = set()
        for idx, item_id in enumerate(idx2uid_map):
            if idx < pivot:
                idx2bg_map[idx] = True
                bg_clip_ids.add(item_id)
            else:
                idx2bg_map[idx] = False

        # Construct a dummy FeatureMemory for the purpose of distance kernel
        # generation
        self._log.info("Generating distance kernel")
        dummy_dk = numpy.matlib.empty((num_features, num_features),
                                      dtype=numpy.bool)
        fm = FeatureMemory(idx2uid_map, bg_clip_ids,
                           feature_mat, dummy_dk)
        kernel_mat = fm._generate_distance_kernel_matrix()

        self._log.info("Saving out data files")
        numpy.save(self.ids_file, idx2uid_map)
        numpy.save(self.bg_flags_file, idx2bg_map)
        numpy.save(self.feature_data_file, feature_mat)
        numpy.save(self.kernel_data_file, kernel_mat)
Exemplo n.º 25
0
def map_async(nr_procs,
              func,
              args_iter,
              args_kw=None,
              daemonic=True,
              threads=False,
              pool=None):
    """Wrap python's ``map_async``

    This has some utility stuff like star passthrough

    Run func on nr_procs with arguments given by args_iter. args_iter
    should be an iterable of the list of arguments that can be unpacked
    for each invocation. kwargs are passed to func as keyword arguments

    Returns:
        (tuple) (pool, multiprocessing.pool.AsyncResult)

    Note:
        When using threads, this is WAY slower than map since
        map_async uses the builtin python ThreadPool. I have no idea
        why that's slower than making threads by hand.

    Note: daemonic can be set to False if one needs to spawn child
        processes in func, BUT this could be vulnerable to creating
        an undead army of worker processes, only use this if you
        really really need it, and know what you're doing

    Example:
        >>> func = lambda i, letter: print i, letter
        >>> p, r = map_async(2, func, itertools.izip(itertools.count(), 'abc'))
        >>> r.get(1e8)
        >>> p.join()
        >>> # the following is printed from 2 processes
        0 a
        1 b
        2 c
    """
    nr_procs = sanitize_nr_procs(nr_procs)
    if args_kw is None:
        args_kw = {}

    if not threads and sys.platform == 'darwin' and (
            "mayavi.mlab" in sys.modules or "mayavi" in sys.modules):
        import mayavi
        if mayavi.ETSConfig.toolkit == 'qt4':
            viscid.logger.critical("Using multiprocessing with Mayavi + Qt4 "
                                   "will cause segfaults on join.\n"
                                   "A workaround is to use the wx backend "
                                   "(`os.environ['ETS_TOOLKIT'] = 'wx'`).")

    args_iter = izip(repeat(func), args_iter, repeat(args_kw))

    # if given a pool, don't close it when we're done delegating tasks
    if pool is not None:
        return pool, pool.map_async(_star_passthrough, args_iter)
    else:
        if threads:
            pool = mp.pool.ThreadPool(nr_procs)
        elif daemonic:
            pool = mp.Pool(nr_procs)
        else:
            pool = NoDaemonPool(nr_procs)

        with closing(pool) as p:
            return p, p.map_async(_star_passthrough, args_iter)
Exemplo n.º 26
0
    def build(self):

        ncpus = num_cpus()

        log.log("Building useing " + str(ncpus) + " threads\n", log.BLUE)

        t = time.time()

        if ncpus > 1:

            import multiprocessing
            import multiprocessing.pool

            pool = multiprocessing.Pool(
                ncpus, initializer=_init_multiprocessing_helper)

            try:
                result = pool.map_async(builder, self.packages, chunksize=1)
                pool.close()
                while not result.ready():
                    try:
                        result.get(1)  # seconds
                    except multiprocessing.TimeoutError:
                        pass
            except KeyboardInterrupt:
                pool.terminate()
                raise

            pool.terminate()
            pool.join()

            results = result.get(1)

            # fix keyboard interupt
            # from multiprocessing.pool import IMapIterator

            # def wrapper(func):
            #     def wrap(self, timeout=None):
            # Note: the timeout of 1 googol seconds introduces a rather subtle
            # bug for Python scripts intended to run many times the age of the universe.
            #         return func(self, timeout=timeout if timeout is not None else 1e100)
            #     return wrap
            # IMapIterator.next = wrapper(IMapIterator.next)

            # with multiprocessing.pool.Pool(ncpus) as pool:
            #     results = pool.map(builder, self.packages)
        else:
            results = []
            for path in self.packages:
                results.append(builder(path))

        log.log(
            "TOTAL Time spent Compiling: %s Seconds\n" %
            (time.time() - t),
            log.BLUE)
        errors = [r for r in results if r]
        if errors:
            return ''.join(errors)
        else:
            log.log("There were no errors", log.GREEN)
        return False
Exemplo n.º 27
0
def main():
    if sys.platform.startswith("linux"):
        if "DISPLAY" not in os.environ:
            # TODO(sfiera): determine when Xvfb is unnecessary and skip this.
            print("no DISPLAY; using Xvfb")
            os.execvp("xvfb-run",
                      ["xvfb-run", "-s", "-screen 0 640x480x24"] + sys.argv)

    os.chdir(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))

    # Get test submodule if necessary.
    if not os.path.isfile("test/space-race.NLRP"):
        print("test data submodule is missing; fetching it")
        subprocess.check_call("git submodule update --init test".split())

    test_types = "unit data offscreen replay".split()
    parser = argparse.ArgumentParser()
    parser.add_argument("--smoke", action="store_true")
    parser.add_argument("--wine", action="store_true")
    parser.add_argument("-t", "--type", action="append", choices=test_types)
    parser.add_argument("test", nargs="*")
    opts = parser.parse_args()

    queue = multiprocessing.Queue()
    pool = multiprocessing.pool.ThreadPool()
    tests = [
        (unit_test, opts, queue, "color-test"),
        (unit_test, opts, queue, "editable-text-test"),
        (unit_test, opts, queue, "fixed-test"),
        (data_test, opts, queue, "build-pix", [], ["--text"]),
        (data_test, opts, queue, "object-data"),
        (data_test, opts, queue, "shapes"),
        (data_test, opts, queue, "tint"),
        (offscreen_test, opts, queue, "fast-motion", ["--text"]),
        (offscreen_test, opts, queue, "main-screen"),
        (offscreen_test, opts, queue, "mission-briefing", ["--text"]),
        (offscreen_test, opts, queue, "options"),
        (offscreen_test, opts, queue, "pause", ["--text"]),
        (replay_test, opts, queue, "and-it-feels-so-good"),
        (replay_test, opts, queue, "astrotrash-plus"),
        (replay_test, opts, queue, "blood-toil-tears-sweat"),
        (replay_test, opts, queue, "hand-over-fist"),
        (replay_test, opts, queue, "hornets-nest"),
        (replay_test, opts, queue, "make-way"),
        (replay_test, opts, queue, "moons-for-goons"),
        (replay_test, opts, queue, "out-of-the-frying-pan"),
        (replay_test, opts, queue, "shoplifter-1"),
        (replay_test, opts, queue, "space-race"),
        (replay_test, opts, queue, "the-left-hand"),
        (replay_test, opts, queue, "the-mothership-connection"),
        (replay_test, opts, queue, "the-stars-have-ears"),
        (replay_test, opts, queue, "while-the-iron-is-hot"),
        (replay_test, opts, queue, "yo-ho-ho"),
        (replay_test, opts, queue,
         "you-should-have-seen-the-one-that-got-away"),
    ]

    if opts.test:
        test_map = dict((t[3], t) for t in tests)
        tests = [test_map[test] for test in opts.test]

    if opts.type:
        if "unit" not in opts.type:
            tests = [t for t in tests if t[0] != unit_test]
        if "data" not in opts.type:
            tests = [t for t in tests if t[0] != data_test]
        if "offscreen" not in opts.type:
            tests = [t for t in tests if t[0] != offscreen_test]
        if "replay" not in opts.type:
            tests = [t for t in tests if t[0] != replay_test]

    if opts.wine:
        tests = [t for t in tests if t[3] in WINE_TESTS]

    sys.stderr.write("Running %d tests:\n" % len(tests))
    start = time.time()
    result = pool.map_async(call, tests)
    pool.close()

    failed = handle_queue(queue, tests)

    end = time.time()
    sys.stderr.write("\nRan %d tests in %.2fs\n" % (len(tests), end - start))
    if failed:
        sys.stderr.write("%d tests failed.\n" % failed)
        sys.exit(1)
    else:
        sys.stderr.write("All tests passed!\n")
Exemplo n.º 28
0
def main():
    test_types = "unit data offscreen replay".split()
    parser = argparse.ArgumentParser()
    parser.add_argument("--smoke", action="store_true")
    parser.add_argument("-t", "--type", action="append", choices=test_types)
    parser.add_argument("test", nargs="*")
    opts = parser.parse_args()

    queue = multiprocessing.Queue()
    pool = multiprocessing.pool.ThreadPool()
    tests = [
        (unit_test, opts, queue, "fixed-test"),

        (data_test, opts, queue, "build-pix", [], ["--text"]),
        (data_test, opts, queue, "object-data"),
        (data_test, opts, queue, "shapes"),
        (data_test, opts, queue, "tint"),

        (offscreen_test, opts, queue, "main-screen"),
        (offscreen_test, opts, queue, "mission-briefing", ["--text"]),
        (offscreen_test, opts, queue, "options"),
        (offscreen_test, opts, queue, "pause", ["--text"]),

        (replay_test, opts, queue, "and-it-feels-so-good"),
        (replay_test, opts, queue, "astrotrash-plus"),
        (replay_test, opts, queue, "blood-toil-tears-sweat"),
        (replay_test, opts, queue, "hand-over-fist"),
        (replay_test, opts, queue, "hornets-nest"),
        (replay_test, opts, queue, "make-way"),
        (replay_test, opts, queue, "moons-for-goons"),
        (replay_test, opts, queue, "out-of-the-frying-pan"),
        (replay_test, opts, queue, "shoplifter-1"),
        (replay_test, opts, queue, "space-race"),
        (replay_test, opts, queue, "the-left-hand"),
        (replay_test, opts, queue, "the-mothership-connection"),
        (replay_test, opts, queue, "the-stars-have-ears"),
        (replay_test, opts, queue, "while-the-iron-is-hot"),
        (replay_test, opts, queue, "yo-ho-ho"),
        (replay_test, opts, queue, "you-should-have-seen-the-one-that-got-away"),
    ]

    if opts.test:
        test_map = dict((t[3], t) for t in tests)
        tests = [test_map[test] for test in opts.test]

    if opts.type:
        if "unit" not in opts.type:
            tests = [t for t in tests if t[0] != unit_test]
        if "data" not in opts.type:
            tests = [t for t in tests if t[0] != data_test]
        if "offscreen" not in opts.type:
            tests = [t for t in tests if t[0] != offscreen_test]
        if "replay" not in opts.type:
            tests = [t for t in tests if t[0] != replay_test]

    sys.stderr.write("Running %d tests:\n" % len(tests))
    start = time.time()
    result = pool.map_async(call, tests)
    pool.close()

    failed = handle_queue(queue, tests)

    end = time.time()
    sys.stderr.write("\nRan %d tests in %.2fs\n" % (len(tests), end - start))
    if failed:
        sys.stderr.write("%d tests failed.\n" % failed)
        sys.exit(1)
    else:
        sys.stderr.write("All tests passed!\n")
Exemplo n.º 29
0
def run(hosts,
        max_age=0,
        sleep=SLEEP,
        times=TIMES,
        warn_days_before=DAYS,
        grades=None,
        parallel=0):
    headers = [
        'host', 'grade', 'ip', 'altNames', 'issuer', 'expires', 'tested',
        'message'
    ]
    table = prettytable.PrettyTable(headers)

    def func(host):
        return get_host_results(host,
                                max_age=max_age,
                                sleep=sleep,
                                times=times)

    threads = min(parallel, len(hosts)) if parallel else len(hosts)
    if threads > 1:
        pool = multiprocessing.pool.ThreadPool(processes=threads)
        async_result = pool.map_async(func, hosts)
        while True:
            try:
                results = async_result.get(1)
            except multiprocessing.TimeoutError:
                continue
            except KeyboardInterrupt:
                log.warning("Received SIGTERM, exiting")
                return
            break
    else:
        results = map(func, hosts)
    log.info("Completed polling for results")

    ok = True

    for result in results:
        host = result['host']
        if result.get('testTime'):
            tested = format_date(datetime.datetime.fromtimestamp(
                result['testTime'] / 1000),
                                 only_rel=True)
        else:
            tested = 'N/A'
        if result['status'] == 'ERROR':
            log.error('%s: %s', host, result['statusMessage'])
            table.add_row([
                host, 'ERROR', '', '', '', '', tested, result['statusMessage']
            ])
            ok = False
            continue
        for endpoint in result['endpoints']:
            ip_addr = endpoint['ipAddress']
            if 'grade' not in endpoint:
                log.error('%s: %s', host, endpoint['statusMessage'])
                table.add_row([
                    host, 'ERROR', ip_addr, '', '', '', tested,
                    endpoint['statusMessage']
                ])
                ok = False
                continue
            grade = endpoint['grade']
            cert = endpoint['details']['cert']
            issuer = cert['issuerLabel']
            alt_names = cert['altNames']
            expires = datetime.datetime.fromtimestamp(cert['notAfter'] / 1000)
            now = datetime.datetime.now()
            days = (expires - now).days
            if expires < now:
                expires_str = 'EXPIRED!'
            elif days < 2:
                expires_str = 'in %s' % (expires - now)
            else:
                expires_str = 'in %d days' % days
            msg = ''
            error = False
            if expires < now:
                msg = 'Certificate expired'
                error = True
            elif days < warn_days_before:
                msg = "Certificate expires in %d days" % days
                error = True
            if grades and grade not in grades:
                if msg:
                    msg += ' - '
                msg += 'Bad grade %s' % grade
                error = True
            ok &= not error
            if error:
                log.error('%s: %s', host, msg)
            else:
                log.info('%s: OK, grade is %s, expires in %d days', host,
                         grade, days)
            table.add_row([
                host, grade, ip_addr, ', '.join(alt_names)[:64], issuer,
                format_date(expires), tested, msg or 'OK'
            ])
    log.info('\n%s\n', table)
    return ok
Exemplo n.º 30
0
        global _downloads
        _downloads = list(
            map(
                lambda e: {
                    'episode': e,
                    'dl': 0,
                    'total': 0,
                    'finished': False,
                    'failed': False,
                    'quality': '',
                    'source': '',
                    'destination': os.path.join(dest, pref + e['name'] + '.mp4'
                                                ),
                    'reason': ''
                }, episodes))
        pool.map_async(download_episode, enumerate(episodes))
        pool.close()

        # Monitor and refresh downloads.
        (height, width) = _stdscr.getmaxyx()
        pad = curses.newpad(len(episodes) * 3 + 10, width + 1)
        padline = 0
        while len(list(filter(lambda d: not d['finished'], _downloads))) > 0:
            row = 1

            # Update windows.
            for download in _downloads:
                episode = download['episode']

                # Clear the line.
                pad.addstr(row, 0, ' ' * width)
Exemplo n.º 31
0
Arquivo: exo1.py Projeto: Lgt2x/PPC-TD
    i = 5
    while i * i <= number:
        if number % i == 0 or number % (i + 2) == 0:
            return False
        i = i + 6
    return True


if __name__ == "__main__":
    # ASYNC
    start_async = time()
    print("Workers : ", argv[1])

    with multiprocessing.Pool(processes=int(argv[1])) as pool:
        number_list = [randint(10**3, 10**6) for _ in range(10**6)]
        print("*** Asynchronous proces map")
        res = pool.map_async(is_prime, number_list)

    print(time() - start_async)
    print()

    # SYNC
    start_sync = time()

    with multiprocessing.Pool(processes=int(argv[1])) as pool:
        number_list = [randint(10**3, 10**6) for _ in range(10**6)]
        print("*** Synchronous proces map")
        res = pool.map(is_prime, number_list)

    print(time() - start_sync)
Exemplo n.º 32
0
def main():
    if sys.platform.startswith("linux"):
        if "DISPLAY" not in os.environ:
            # TODO(sfiera): determine when Xvfb is unnecessary and skip this.
            print("no DISPLAY; using Xvfb")
            os.execvp("xvfb-run", ["xvfb-run", "-s", "-screen 0 640x480x24"] + sys.argv)

    os.chdir(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))

    # Get test submodule if necessary.
    if not os.path.isfile("test/space-race.NLRP"):
        print("test data submodule is missing; fetching it")
        subprocess.check_call("git submodule update --init test".split())

    test_types = "unit data offscreen replay".split()
    parser = argparse.ArgumentParser()
    parser.add_argument("--smoke", action="store_true")
    parser.add_argument("-t", "--type", action="append", choices=test_types)
    parser.add_argument("test", nargs="*")
    opts = parser.parse_args()

    queue = multiprocessing.Queue()
    pool = multiprocessing.pool.ThreadPool()
    tests = [
        (unit_test, opts, queue, "color-test"),
        (unit_test, opts, queue, "fixed-test"),
        (data_test, opts, queue, "build-pix", [], ["--text"]),
        (data_test, opts, queue, "object-data"),
        (data_test, opts, queue, "shapes"),
        (data_test, opts, queue, "tint"),
        (offscreen_test, opts, queue, "fast-motion"),
        (offscreen_test, opts, queue, "main-screen"),
        (offscreen_test, opts, queue, "mission-briefing", ["--text"]),
        (offscreen_test, opts, queue, "options"),
        (offscreen_test, opts, queue, "pause", ["--text"]),
        (replay_test, opts, queue, "and-it-feels-so-good"),
        (replay_test, opts, queue, "astrotrash-plus"),
        (replay_test, opts, queue, "blood-toil-tears-sweat"),
        (replay_test, opts, queue, "hand-over-fist"),
        (replay_test, opts, queue, "hornets-nest"),
        (replay_test, opts, queue, "make-way"),
        (replay_test, opts, queue, "moons-for-goons"),
        (replay_test, opts, queue, "out-of-the-frying-pan"),
        (replay_test, opts, queue, "shoplifter-1"),
        (replay_test, opts, queue, "space-race"),
        (replay_test, opts, queue, "the-left-hand"),
        (replay_test, opts, queue, "the-mothership-connection"),
        (replay_test, opts, queue, "the-stars-have-ears"),
        (replay_test, opts, queue, "while-the-iron-is-hot"),
        (replay_test, opts, queue, "yo-ho-ho"),
        (replay_test, opts, queue, "you-should-have-seen-the-one-that-got-away"),
    ]

    if opts.test:
        test_map = dict((t[3], t) for t in tests)
        tests = [test_map[test] for test in opts.test]

    if opts.type:
        if "unit" not in opts.type:
            tests = [t for t in tests if t[0] != unit_test]
        if "data" not in opts.type:
            tests = [t for t in tests if t[0] != data_test]
        if "offscreen" not in opts.type:
            tests = [t for t in tests if t[0] != offscreen_test]
        if "replay" not in opts.type:
            tests = [t for t in tests if t[0] != replay_test]

    sys.stderr.write("Running %d tests:\n" % len(tests))
    start = time.time()
    result = pool.map_async(call, tests)
    pool.close()

    failed = handle_queue(queue, tests)

    end = time.time()
    sys.stderr.write("\nRan %d tests in %.2fs\n" % (len(tests), end - start))
    if failed:
        sys.stderr.write("%d tests failed.\n" % failed)
        sys.exit(1)
    else:
        sys.stderr.write("All tests passed!\n")