Example #1
0
class Game(BaseGame):
    def __init__(self, fps):
        super(Game, self).__init__(fps)

        self.in_messages = Queue()
        self.players = {}
        self.phys_simulator = PhysicsSimulator()

    def on_client_connect(self, websocket):
        player = Player(websocket)
        self.players[player.id] = player
        return player.id

    def on_client_disconnect(self, player_id):
        self.players.pop(player_id, None)

    def on_message_received(self, message):
        message["timestamp"] = time.time()
        player_id = message["player_id"]
        self.players[player_id].send_message(message)
        self.in_messages.put(message)

    def update(self, delta):
        self.phys_simulator.taskMgr.step()
        pass

    def render(self):
        # print 'render'
        pass
Example #2
0
    def __init__(self, aCritter):
        """Initializes the post office.

        Arguments:
            aCritter: The critter.

        """
        self.mCritter = aCritter

        settings = self.mCritter.getSettings()

        # Configuring the logger.
        self.mLogger = logging.getLogger(self.__class__.__name__)
        self.mLogger.propagate = False
        # TODO: Remove the hardcoded value of the path.
        handler = logging.FileHandler("/tmp/" + self.mCritter.mCrittnick + ".log")
        # TODO: Remove the hardcoded value of the formatter.
        formatter = logging.Formatter("[%(asctime)s][%(threadName)28s][%(levelname)8s] - %(message)s")
        handler.setFormatter(formatter)
        self.mLogger.addHandler(handler)
        self.mLogger.setLevel(self.mCritter.mSettings.get("logging", "level"))

        policy = settings.get("crittwork", "policy")

        addressPublisher = addressSubscriber = ""

        # TODO: Now this is an ifology, it should be a real policy.
        if policy == "multicast":
            addressPublisher = addressSubscriber = settings.get("crittwork", "multicast")
        elif policy == "broker":
            addressPublisher = settings.get("crittwork", "brokerPublisher")
            addressSubscriber = settings.get("crittwork", "brokerSubscriber")
        else:
            assert False, "Invalid crittwork policy selected."

        # TODO: Get from the factory.
        self.mTransport = TransportZMQ(addressPublisher, addressSubscriber)

        self.mOutgoingAnnouncementsQueue = Queue()
        self.mIncomingAnnouncementsQueue = Queue()

        self.mRiteConnector = RiteConnector(aCritter.mRites)

        self.mMessageEncoder = MessageEncoder()

        # Spawning the announcement publisher.
        self.mLogger.debug("Spawning the announcement publisher.")
        self.mAnnouncementPublisher = AnnouncementPublisher(self)
        self.mAnnouncementPublisher.setDaemon(True)

        # Spawning the announcement subscriber.
        self.mLogger.debug("Spawning the announcement subscriber.")
        self.mAnnouncementSubscriber = AnnouncementSubscriber(self)
        self.mAnnouncementSubscriber.setDaemon(True)

        # Spawning the message router.
        # FIXME: Jealous.
        self.mLogger.debug("Spawning the message router.")
        self.mMessageRouter = MessageRouter(self)
        self.mMessageRouter.setDaemon(True)
Example #3
0
    def _update_instances_in_parallel(self, target, instances_to_update):
        """Processes instance updates in parallel and waits for completion.

    Arguments:
    target -- target method to handle instance update.
    instances_to_update -- list of InstanceData with update details.

    Returns Queue with non-updated instance data.
    """
        log.info("Processing in parallel with %s worker thread(s)" % self._update_config.batch_size)
        instance_queue = Queue()
        for instance_to_update in instances_to_update:
            instance_queue.put(instance_to_update)

        try:
            threads = []
            for _ in range(self._update_config.batch_size):
                threads.append(spawn_worker(target, kwargs={"instance_queue": instance_queue}))

            for thread in threads:
                thread.join_and_raise()
        except Exception:
            self._terminate()
            raise

        return instance_queue
Example #4
0
def test_keyboard_input(option=0):
    if option == 0:
        clockThread = cxClockThread(1)
        consoleThread = cxConsoleThread()

        consoleThread.setRequestQueue(clockThread.getRequestQueue())
        clockThread.setResultQueue(consoleThread.getResultQueue())

        clockThread.start()
        consoleThread.start()

        clockThread.join()
        consoleThread.join()

    elif option == 1:
        queue = Queue()
        clockThread = cxClockThread(1)
        clockThread.setResultQueue(queue)

        clockThread.start()

        time.sleep(5)
        clockThread.terminate()
        clockThread.join()
        for i in range(0, queue.qsize()):
            print queue.get()
Example #5
0
class Events(threading.Thread):
    def __init__(self, callback):
        super(Events, self).__init__()
        self.queue = Queue()
        self.daemon = True
        self.callback = callback
        self.name = "EVENT-QUEUE"
        self.stop = threading.Event()

    def put(self, type):
        self.queue.put(type)

    def run(self):
        try:
            while not self.stop.is_set():
                try:
                    # get event type
                    type = self.queue.get(True, 1)

                    # perform callback if we got a event type
                    self.callback(type)

                    # event completed
                    self.queue.task_done()
                except Empty:
                    type = None

            # exiting thread
            self.stop.clear()
        except Exception, e:
            logger.log(u"Exception generated in thread " + self.name + ": " + ex(e), logger.ERROR)
            logger.log(repr(traceback.format_exc()), logger.DEBUG)
Example #6
0
class MirrorCheckPool(object):
    def __init__(self, urls, timeout=10, num_threads=10):
        self.tasks = Queue()
        self.logs = deque()
        for i in list(urls):
            self.tasks.put(i)
        self.threads = []
        for i in range(num_threads):
            thread = Thread(target=mirror_url_worker, args=(self.tasks, self.logs, timeout))
            thread.daemon = True
            self.threads.append(thread)

    @transaction.commit_on_success
    def run(self):
        logger.debug("starting threads")
        for thread in self.threads:
            thread.start()
        logger.debug("joining on all threads")
        self.tasks.join()
        logger.debug("processing %d log entries", len(self.logs))
        if database_vendor(MirrorLog, mode="write") == "sqlite":
            for log in self.logs:
                log.save(force_insert=True)
        else:
            MirrorLog.objects.bulk_create(self.logs)
        logger.debug("log entries saved")
Example #7
0
class BlockingShellSocketChannel(ShellSocketChannel):
    def __init__(self, context, session, address=None):
        super(BlockingShellSocketChannel, self).__init__(context, session, address)
        self._in_queue = Queue()

    def call_handlers(self, msg):
        # io.rprint('[[Shell]]', msg) # dbg
        self._in_queue.put(msg)

    def msg_ready(self):
        """Is there a message that has been received?"""
        if self._in_queue.qsize() == 0:
            return False
        else:
            return True

    def get_msg(self, block=True, timeout=None):
        """Get a message if there is one that is ready."""
        if block and timeout is None:
            # never use timeout=None, because get
            # becomes uninterruptible
            timeout = 1e6
        return self._in_queue.get(block, timeout)

    def get_msgs(self):
        """Get all messages that are currently ready."""
        msgs = []
        while True:
            try:
                msgs.append(self.get_msg(block=False))
            except Empty:
                break
        return msgs
    def launch(self, command, cwd):
        def enqueue_output(stdout, stderr, std_out_q, std_err_q):
            for line in iter(stdout.readline, b""):
                std_out_q.put(line)
            stdout.close()
            for line in iter(stderr.readline, b""):
                std_err_q.put(line)
            stderr.close()

        self.process_name = command[0]

        shell = win32com.client.Dispatch("WScript.Shell")

        import ctypes

        ctypes.windll.kernel32.SetErrorMode(0)

        self.process = subprocess.Popen(
            command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd
        )

        self.std_out_q = Queue()
        self.std_err_q = Queue()
        self.monitor_output_thread = Thread(
            target=enqueue_output, args=(self.process.stdout, self.process.stderr, self.std_out_q, self.std_err_q)
        )
        self.monitor_output_thread.start()
Example #9
0
        def _process():
            def _process_line(line):
                m = re.match(pattern, line)
                if m:
                    m = m.groupdict()
                    address = m.get("address", "").upper()
                    name = m.get("name", "")
                    uuid = m.get("uuid", "")
                    # if name is Amiigo-ish or if we have amiigo service uuid
                    if name or uuid == "CCA3100078C647859E450887D451317C":
                        self.add_amiigo(address, name, console=console)

            q = Queue()
            t = Thread(target=enqueue_output, args=(p.stdout, q))
            t.daemon = True  # thread dies with the program
            t.start()

            _time = time.time()

            elapsed = 0
            done = False
            while not done:
                try:
                    line = q.get(timeout=0.1)
                except Empty:
                    line = ""
                if line:
                    line = line.rstrip()
                    _process_line(line)
                elapsed = time.time() - _time
                done = elapsed > self.timeout_secs or self.done
Example #10
0
 def __init__(self, inSocket, clientId, parent=None):
     QObject.__init__(self, parent)
     self.socket = inSocket
     self.replies = Queue()
     self.requests = Queue()
     self.clientId = clientId
     self.buf = bytes()
Example #11
0
class ThreadManager(object):
    """ ThreadManager provides thread on demand """

    __metaclass__ = Singleton

    NUM_THREAD = 4  # Default number of threads

    def __init__(self, num_thread=NUM_THREAD):
        """ Create num_thread Threads """

        self.queue = Queue()

        self.thread_list = []

        for i in xrange(num_thread):
            t = Thread(target=worker, args=(self.queue,))
            t.setDaemon(True)
            t.start()

            self.thread_list.append(t)

    def add_task(self, func, params):
        """
        Add a task to perform
        :param func: function to call
        :param params : tuple of parameters
        """

        self.queue.put((func, params))

    def clear(self):
        """ clear pending task """

        while not self.queue.empty():
            self.queue.get()
Example #12
0
    def as_completed(futures, timeout=None):
        # Record the start time in order to determine the remaining
        # timeout as futures are completed.
        start = time.time()

        # Use a queue to collect the done futures.
        queue = Queue()

        # Define a helper for the future "done callback".
        def done(future):
            queue.put(future)

        # Add a "done callback" for each future.
        for future in futures:
            future.add_done_callback(done)

        # Helper to determine the remaining timeout.
        def remaining():
            if timeout is None:
                return None
            end = start + timeout
            remaining = end - time.time()
            return remaining if remaining >= 0 else 0

        # Now wait until all the futures have completed or we timeout.
        finished = 0
        while finished < len(futures):
            try:
                yield queue.get(timeout=remaining())
            except Empty:
                raise TimeoutError()
            else:
                finished += 1
Example #13
0
class WebSocket(protocol.Protocol):
    websockets = []

    @classmethod
    def add_socket(self, ws):
        print "adding a websocket"
        WebSocket.websockets.append(ws)

    @classmethod
    def broadcast(self, message):
        for ws in WebSocket.websockets:
            ws.message_queue.put(message)
            ws.send_all_messages()

    def connectionMade(self):
        self.message_queue = Queue()
        for i in range(len(BitClient.message_list)):
            self.message_queue.put(BitClient.message_list[i])
        self.send_all_messages()

    def connectionLost(self, reason):
        print "connection lost for", self
        WebSocket.websockets.remove(self)

    def send_all_messages(self):
        print "SENDING ALL MESSAGES"
        while not self.message_queue.empty():
            self.transport.write(self.message_queue.get())
Example #14
0
class PooledPg:
    """A very simple PostgreSQL connection pool.

	After you have created the connection pool,
	you can get connections using getConnection().
	"""

    def __init__(self, maxconnections, *args, **kwargs):
        """Set up the PostgreSQL connection pool.

		maxconnections: the number of connections cached in the pool
		args, kwargs: the parameters that shall be used to establish
			the PostgreSQL connections using pg.connect()
		"""
        # Since there is no connection level safety, we
        # build the pool using the synchronized queue class
        # that implements all the required locking semantics.
        from Queue import Queue

        self._queue = Queue(maxconnections)
        # Establish all database connections (it would be better to
        # only establish a part of them now, and the rest on demand).
        for i in range(maxconnections):
            self.cache(PgConnection(*args, **kwargs))

    def cache(self, con):
        """"Add or return a connection to the pool."""
        self._queue.put(con)

    def connection(self):
        """"Get a connection from the pool."""
        return PooledPgConnection(self, self._queue.get())
Example #15
0
class _ParamUpdater(Thread):
    def __init__(self, cf, updatedCallback):
        Thread.__init__(self)
        self.setDaemon(True)
        self.cf = cf
        self.updatedCallback = updatedCallback
        self.requestQueue = Queue()
        self.incommingQueue = Queue()
        self.cf.add_port_callback(CRTPPort.PARAM, self._new_packet_cb)

    def _new_packet_cb(self, pk):
        if pk.channel != TOC_CHANNEL:
            self.updatedCallback(pk)
            self.incommingQueue.put(0)  # Don't care what we put, used to sync

    def request_param_update(self, varid):
        logger.debug("Requesting update for varid %d", varid)
        pk = CRTPPacket()
        pk.set_header(CRTPPort.PARAM, READ_CHANNEL)
        pk.data = struct.pack("<B", varid)
        self.cf.send_packet(pk, expect_answer=True)

    def run(self):
        while True:
            varid = self.requestQueue.get()  # Wait for request update
            self.request_param_update(varid)  # Send request for update
            self.incommingQueue.get()  # Blocking until reply arrives
Example #16
0
    class Watcher(WatcherBase):
        def __init__(self, root_dirs, worker, log):
            WatcherBase.__init__(self, worker, log)
            self.stream = Stream(self.notify, *(x.encode("utf-8") for x in root_dirs), file_events=True)
            self.wait_queue = Queue()

        def wakeup(self):
            self.wait_queue.put(True)

        def loop(self):
            observer = Observer()
            observer.schedule(self.stream)
            observer.daemon = True
            observer.start()
            try:
                while True:
                    try:
                        # Cannot use blocking get() as it is not interrupted by
                        # Ctrl-C
                        if self.wait_queue.get(10000) is True:
                            self.force_restart()
                    except Empty:
                        pass
            finally:
                observer.unschedule(self.stream)
                observer.stop()

        def notify(self, ev):
            name = ev.name
            if isinstance(name, bytes):
                name = name.decode("utf-8")
            if self.file_is_watched(name):
                self.handle_modified({name})
Example #17
0
    def __init__(self, vimx):
        """ Creates the LLDB SBDebugger object and more! """
        import logging

        self.logger = logging.getLogger(__name__)
        self.logger.setLevel(logging.INFO)

        self._sink = open("/dev/null")
        self._dbg = lldb.SBDebugger.Create()
        self._dbg.SetOutputFileHandle(self._sink, False)
        self._ipreter = self._dbg.GetCommandInterpreter()

        self._rcx = lldb.SBListener("the_ear")  # receiver
        self._trx = lldb.SBBroadcaster("the_mouth")  # transmitter for user events
        self._trx.AddListener(self._rcx, self.CTRL_VOICE)

        self._target = None
        self._process = None
        self._num_bps = 0

        self.in_queue = Queue(maxsize=2)
        self.out_queue = Queue(maxsize=1)

        self.vimx = vimx
        self.busy_stack = 0  # when > 0, buffers are not updated
        self.buffers = VimBuffers(vimx)
        self.session = Session(self, vimx)

        super(Controller, self).__init__()  # start the thread
Example #18
0
class AbstractServer(AbstractStoppableEntity):
    def __init__(self, max_connections, invoke_workers):
        AbstractStoppableEntity.__init__(self)
        self.max_connections = max_connections
        self.invoke_workers = invoke_workers
        self.workers = Queue(maxsize=max_connections)
        self.running = False

    def run(self):
        self.running = True
        while self.running:
            message = self.get_from_queue(self.input_queue)
            if self.is_finish_signal(message):
                self.running = False
            else:
                self.output_queue.put(self.connect())

    def connect(self):
        worker = self.get_new_worker()
        self.workers.put(worker)
        self.start_worker(worker)
        return worker.input_queue, worker.output_queue

    def create_worker(self):
        return self.get_new_worker()

    def get_new_worker(self):
        raise NotImplementedError

    def start_worker(self, worker):
        if self.invoke_workers:
            worker.start()

    def finish_worker(self):
        self.workers.get()
Example #19
0
    def map_buffers(self, fn):
        """ Does a map using fn callback on all buffer object and returns a list.
        @param fn: callback function which takes buffer object as a parameter.
                   If None is returned, the item is ignored.
                   If a StopIteration is raised, the loop breaks.
        @return: The last item in the list returned is a placeholder indicating:
                 * completed iteration, if None is present
                 * otherwise, if StopIteration was raised, the message would be the last item
    """
        vim = self._vim
        out_q = Queue(maxsize=1)

        def map_buffers_inner():
            mapped = []
            breaked = False
            for b in vim.buffers:
                try:
                    ret = fn(b)
                    if ret is not None:
                        mapped.append(ret)
                except StopIteration as e:
                    mapped.append(e.message)
                    breaked = True
                    break
            if not breaked:
                mapped.append(None)
            out_q.put(mapped)

        if self._vim_test:
            map_buffers_inner()
        else:
            vim.session.threadsafe_call(map_buffers_inner)
        return out_q.get()
Example #20
0
def get_outdated():
    global progress
    global q
    global done
    global outdated

    outdated = []
    progress = None
    done = []

    # Start up worker pool
    q = Queue()
    # Ten concurrent connections are alright, I think.
    num_worker_threads = 10
    for i in range(num_worker_threads):
        t = Thread(target=worker)
        t.daemon = True
        t.start()

    only_local_packages = False
    pkg_list = pip.get_installed_distributions(local_only=only_local_packages)
    progress = progressbar.ProgressBar(
        widgets=[progressbar.SimpleProgress(), " ", progressbar.Bar(), " ", progressbar.ETA()], maxval=len(pkg_list)
    ).start()
    for pkg in pkg_list:
        q.put(pkg)
    q.join()
    progress.finish()
    return outdated
Example #21
0
class ThreadedWrapperHandler(WrapperHandler):
    """This handled uses a single background thread to dispatch log records
    to a specific other handler using an internal queue.  The idea is that if
    you are using a handler that requires some time to hand off the log records
    (such as the mail handler) and would block your request, you can let
    Logbook do that in a background thread.

    The threaded wrapper handler will automatically adopt the methods and
    properties of the wrapped handler.  All the values will be reflected:

    >>> twh = ThreadedWrapperHandler(TestHandler())
    >>> from logbook import WARNING
    >>> twh.level_name = 'WARNING'
    >>> twh.handler.level_name
    'WARNING'
    """

    _direct_attrs = frozenset(["handler", "queue", "controller"])

    def __init__(self, handler, maxsize=0):
        WrapperHandler.__init__(self, handler)
        self.queue = ThreadQueue(maxsize)
        self.controller = TWHThreadController(self)
        self.controller.start()

    def close(self):
        self.controller.stop()
        self.handler.close()

    def emit(self, record):
        try:
            self.queue.put_nowait(record)
        except Full:
            # silently drop
            pass
Example #22
0
class Stack:
    # initialize your data structure here.
    def __init__(self):
        self.q = Queue()
        self.first = None

    # @param x, an integer
    # @return nothing
    def push(self, x):
        if not self.first:
            self.first = x
        else:
            nq = Queue()
            nq.put(self.first)
            while not self.q.empty():
                nq.put(self.q.get())
            self.q = nq
            self.first = x

    # @return nothing
    def pop(self):
        if not self.q.empty():
            self.first = self.q.get()
        else:
            self.first = None

    # @return an integer
    def top(self):
        return self.first

    # @return an boolean
    def empty(self):
        return self.first is None
Example #23
0
class event_manager(threading.Thread):

    def __init__(self):
        self.e_queue = Queue()
        self.stop = False
        self.cache_arr = []
        self.cnt = 0
        threading.Thread.__init__(self)

    def stop(self):
        self.stop = True

    def print(self, event):
        print event

    def get_event_queue(self):
        return self.e_queue

    def run(self):
        while not self.stop:
            if not self.e_queue.empty():
                pro_event = self.e_queue.get()
                if not isinstance(pro_event, my_event):
                    my_log.error("pro_event type error")
                else:
                    self.print(pro_event)
            else:
                self.cnt += 1
                sleep(1)
Example #24
0
    def run(self, tasks):
        import time
        from Queue import Queue
        from threading import Thread

        def _job_core(tasks):
            while tasks.qsize() > 0:
                if self.lock.acquire():
                    task = tasks.get()
                    self.lock.release()
                    code, msg = task()
                    report_info(">> [%s] : %s" % (task.name, msg))
                    if isinstance(code, int):
                        exit(code)
                    if tasks.qsize() != 0:
                        report_info("   Reaming %d." % (tasks.qsize()))
                    tasks.task_done()

        JOBS_SIZE = 5
        runningTasks = Queue()
        # Run tasks with job size
        for task in tasks:
            runningTasks.put(task)

        runners = []
        for i in xrange(JOBS_SIZE):
            runner = Thread(target=_job_core, args=(runningTasks,))
            runner.daemon = True
            runner.start()
            runners.append(runner)
        # Wait all tasks complete, It can response Ctrl + C interrupt.
        while any(runner.isAlive() for runner in runners):
            time.sleep(1)
Example #25
0
class EmitterThread(threading.Thread):
    def __init__(self, *args, **kwargs):
        self.__name = kwargs["name"]
        self.__emitter = kwargs.pop("emitter")()
        self.__logger = kwargs.pop("logger")
        self.__config = kwargs.pop("config")
        self.__max_queue_size = kwargs.pop("max_queue_size", 100)
        self.__queue = Queue(self.__max_queue_size)
        threading.Thread.__init__(self, *args, **kwargs)
        self.daemon = True

    def run(self):
        while True:
            (data, headers) = self.__queue.get()
            try:
                self.__logger.debug("Emitter %r handling a packet", self.__name)
                self.__emitter(data, self.__logger, self.__config)
            except Exception:
                self.__logger.error("Failure during operation of emitter %r", self.__name, exc_info=True)

    def enqueue(self, data, headers):
        try:
            self.__queue.put((data, headers), block=False)
        except Full:
            self.__logger.warn("Dropping packet for %r due to backlog", self.__name)
Example #26
0
 def get(self):
     if Queue.empty(self):
         return None
     else:
         indexer = Queue.get(self)
         indexer.refreshEMSState()
         return indexer
Example #27
0
def process(input_file, output_file, lang="eng", jobs=4):
    tmp = os.path.join(mkdtemp(), "")
    try:
        resolution = get_resolution(input_file)
        w, h = resolution[0]
        logging.info(
            "{} pages, {}mm*{}mm {}".format(
                len(resolution), int(w / 7.2 * 2.54), int(h / 7.2 * 2.54), get_paper_type(w, h)
            )
        )
        logging.info("Extract pages from {}".format(input_file))
        images = extract_images(input_file, tmp)
        num_workers = min(len(images), jobs)
        queue = Queue()
        start_workers(num_workers, queue, lang, resolution)
        logging.info("Process {} pages with {} threads".format(len(images), num_workers))
        for idx, image in enumerate(images, start=1):
            queue.put((idx, image))
        queue.join()
        pages = sorted(glob(os.path.join(tmp, "*.pdf")))
        logging.info("OCR complete. Merge pages into '{}'".format(output_file))
        merge_pdf(pages, output_file)
        check_call(["ls", "-lh", input_file, output_file])
    finally:
        rmtree(tmp)
Example #28
0
File: process.py Project: rkell/hpx
    def read(self, timeout=None):
        read_queue = Queue()

        def enqueue_output():
            for block in iter(self._proc.stdout.read, b""):
                read_queue.put(block)

            read_queue.put("")

        thread = Thread(target=enqueue_output)
        thread.daemon = True
        thread.start()

        output = ""

        try:
            started = time()

            while timeout is None or not float_info.epsilon > timeout:
                s = read_queue.get(timeout=timeout)

                if s:
                    output += s
                else:
                    return output

                if not timeout is None:
                    timeout -= time() - started
        except Empty:
            return output
    def run(self, result):
        """Run the tests concurrently.

        This calls out to the provided make_tests helper, and then serialises
        the results so that result only sees activity from one TestCase at
        a time.

        ConcurrentTestSuite provides no special mechanism to stop the tests
        returned by make_tests, it is up to the make_tests to honour the
        shouldStop attribute on the result object they are run with, which will
        be set if an exception is raised in the thread which
        ConcurrentTestSuite.run is called in.
        """
        tests = self.make_tests(self)
        try:
            threads = {}
            queue = Queue()
            result_semaphore = threading.Semaphore(1)
            for test in tests:
                process_result = testtools.ThreadsafeForwardingResult(result, result_semaphore)
                reader_thread = threading.Thread(target=self._run_test, args=(test, process_result, queue))
                threads[test] = reader_thread, process_result
                reader_thread.start()
            while threads:
                finished_test = queue.get()
                threads[finished_test][0].join()
                del threads[finished_test]
        except:
            for thread, process_result in threads.values():
                process_result.stop()
            raise
    def test_reset_device(self):
        def newthread(exception_queue):
            try:
                devices = range(driver.get_device_count())
                print("Devices", devices)
                for _ in range(2):
                    for d in devices:
                        cuda.select_device(d)
                        print("Selected device", d)
                        cuda.close()
                        print("Closed device", d)
            except Exception as e:
                exception_queue.put(e)

        # Do test on a separate thread so that we don't affect
        # the current context in the main thread.

        exception_queue = Queue()
        t = threading.Thread(target=newthread, args=(exception_queue,))
        t.start()
        t.join()

        exceptions = []
        while not exception_queue.empty():
            exceptions.append(exception_queue.get())
        self.assertEqual(exceptions, [])