Ejemplo n.º 1
0
def secondTest():
    id = 1
    input = _Queue()
    for i in range(10):
        input.put(i)
    output = _Queue()
    # TODO: play with the checkPeriod
    start = _Event()
    step = _Event()
    pause = _Event()
    resume = _Event()
    stop = _Event()
    printerLock = _Lock()
    print("\n\tSecond test:")
    worker = Worker(id, target, input, output, debug=True)
    worker.postHook = postProcess
    worker.postExtraArgs = {'lock': printerLock,
                            'breaker': 8, 'worker': worker}
    worker.start()
    while worker.isAlive():
        sleep(3)
        worker.pause()
        sleep(3)
        worker.resume()
        sleep(3)
    print("\n\tTest passed")
Ejemplo n.º 2
0
 def __init__(self, *args, **kwargs):
     super(EventManager, self).__init__(*args, **kwargs)
     self.__startEvent = _Event()
     self.__whenStart = None
     self.__stepEvent = _Event()
     self.__pauseEvent = _Event()
     self.__pauseRequesterStack = []
     self.__resumeEvent = _Event()
     self.__stopEvent = _Event()
Ejemplo n.º 3
0
    def __init__(self, id, target, inputQueue, outputQueue, checkPeriod=None,
                 preHook=None, preExtraArgs=None,
                 postHook=None, postExtraArgs=None,
                 *args, **kwargs):
        """
            Build an object...

            Arguments:
            + id: integer that can identify the worker between others.
            + target: Method that each of the parallel process will be
              executing with the input arguments. It must be callable with
              objects in the argin queue as parameters.
            + inputQueue: multithreading queue where each element is data input
              for the method that will be executed by the child process.
            + outputQueue: multithreading queue where the results will be
              stored after the execution.
            * {pre, post}Hook: callable objects to be executed before or after
              the target.
            * {pre, post}ExtraArgs: dictionaries that will be passed to hooks.
        """
        super(Worker, self).__init__(*args, **kwargs)
        self.__id = id
        if not callable(target):
            raise AssertionError("Target must be callable object")
        else:
            self.__target = target
        self.__input = inputQueue
        self.__currentArgin = None
        self.__output = outputQueue
        self.__ctr = _Value(_ulonglong, 0)
        self.__computationTime = _Value(_float, 0.0)
        self.__currentArgout = None
        self.__checkPeriod = 60  # seconds
        self.checkPeriod = checkPeriod
        # Events ---
        self.__events = _EventManager()
        self.__prepared = _Event()
        self.__prepared.clear()
        self.__internalEvent = _Event()
        self.__internalEvent.clear()
        # Hooks ---
        self.__preHook = None
        self.__preExtraArgs = None
        self.__postHook = None
        self.__postExtraArgs = None
        self.preHook = preHook
        self.preExtraArgs = preExtraArgs
        self.postHook = postHook
        self.postExtraArgs = postExtraArgs
        # thread and process ---
        self.__monitor = _Thread(target=self.__thread)
        self.__worker = None
        self.__monitor.setDaemon(True)
        self.__monitor.start()
        self.__workerPausedFlag = False
Ejemplo n.º 4
0
def firstTest():
    id = 0
    input = _Queue()
    for i in range(10):
        input.put(i)
    output = _Queue()
    # TODO: play with the checkPeriod
    start = _Event()
    step = _Event()
    pause = _Event()
    resume = _Event()
    stop = _Event()
    printerLock = _Lock()
    print("\n\tFirst test:")
    worker = Worker(id, target, input, output, debug=True)
    worker.start()
    sleep(3)
    worker.pause()
    sleep(3)
    worker.resume()
    while worker.isAlive():
        sleep(3)
    print("\n\tTest passed")
Ejemplo n.º 5
0
def test_threaded_run(hive, bee_factory):
    stop = _Event()
    listener = bee_factory.create('listener')
    streamer = bee_factory.create('streamer')
    old_on_event = listener.on_event

    def on_event(event):
        if not stop.is_set():
            stop.set()
        else:
            hive.kill()
        return old_on_event(event)

    listener.on_event = on_event
    hive.add(listener)
    hive.add(streamer)
    hive.run(threaded=True)
    stop.wait()
    assert len(listener.calls) > 0, 'Streamer did not yield any events'
    assert isinstance(listener.calls[0],
                      pybeehive.Event), 'Streamer did not yield correct data'
Ejemplo n.º 6
0
    def processes_start(self):
        """."""
        # Create shared memory objects to be shared with worker processes.
        arr = self._sofb_current_readback_ref

        rbref = _shm.Array(_shm.ctypes.c_double, arr.size, lock=False)
        self._sofb_current_readback_ref = _np.ndarray(
            arr.shape, dtype=arr.dtype, buffer=memoryview(rbref))

        ref = _shm.Array(_shm.ctypes.c_double, arr.size, lock=False)
        self._sofb_current_refmon = _np.ndarray(
            arr.shape, dtype=arr.dtype, buffer=memoryview(ref))

        fret = _shm.Array(_shm.ctypes.c_int, arr.size, lock=False)
        self._sofb_func_return = _np.ndarray(
            arr.shape, dtype=_np.int32, buffer=memoryview(fret))

        # Unit converter.
        self.converter = UnitConverter(self._sofb_psnames)

        # subdivide the pv list for the processes
        nr_bbbs = len(PSSOFB.BBBNAMES)
        div = nr_bbbs // self._nr_procs
        rem = nr_bbbs % self._nr_procs
        sub = [div*i + min(i, rem) for i in range(self._nr_procs+1)]
        for i in range(self._nr_procs):
            bbbnames = PSSOFB.BBBNAMES[sub[i]:sub[i+1]]
            evt = _Event()
            evt.set()
            theirs, mine = _Pipe(duplex=False)
            proc = _Process(
                target=PSSOFB._run_process,
                args=(self._ethbridge_cls, bbbnames, theirs, evt,
                      arr.shape, rbref, ref, fret),
                daemon=True)
            proc.start()
            self._procs.append(proc)
            self._doneevts.append(evt)
            self._pipes.append(mine)
Ejemplo n.º 7
0
 def __init__(self, *args, **kwargs):
     super(ConditionCheck, self).__init__(*args, **kwargs)
     self.__IPaused = _Event()
     self.__events = _EventManager()
Ejemplo n.º 8
0
def _generate_parallel(n_process, n_iter, gen_func, args_list):
    """
	Generator which spawns processes to run generators, then uses a queue for each process to retrieve 
	the results which it then yields.

	"""
    n_items = len(args_list)

    # calculate how to distribute generators over processes.
    if n_items <= n_process and n_process > 0:
        n_process = n_items
        n_pp = 1
        n_left = 1
    elif n_items > n_process and n_process > 0:
        n_pp = n_items // n_process
        n_left = n_pp + n_items % n_process

    # if one process specified just do the generator without sub processes.
    if n_process <= 1:
        gens = []
        for arg in args_list:
            gens.append(gen_func(*arg))

        generator = _izip(*gens)

        for s in generator:
            yield s

        return
    # split up argument list
    sub_lists = [args_list[0:n_left]]
    sub_lists.extend([
        args_list[n_left + i * n_pp:n_left + (i + 1) * n_pp]
        for i in range(n_process - 1)
    ])

    # create lists of queues, events, and processes.
    es = []
    qs = []
    ps = []
    for i in range(n_process):
        e = _Event()
        q = _Queue(1)
        p = _Process(target=_worker, args=(gen_func, sub_lists[i], q, e))
        p.daemon = True
        es.append(e)
        qs.append(q)
        ps.append(p)

    # start processes
    for p in ps:
        p.start()

    # for number of iterations
    for i in range(n_iter):
        s = []
        # retrieve results for each sub-process and let the process know to continue calculation.
        for q, e in _izip(qs, es):
            s.extend(q.get())
            e.set()  # free process to do next calculation

        # yield all results
        yield tuple(s)

    # end processes
    for p in ps:
        p.join()