예제 #1
0
파일: mp_ext.py 프로젝트: XinliYu/utix
def parallel_process_by_queue(num_p,
                              data_iter,
                              target,
                              args,
                              ctx: BaseContext = None,
                              task_unit_size=5000,
                              print_out=__debug__):
    if isinstance(target, MPTarget):
        target.use_queue = True
    if ctx is None:
        ctx = get_context('spawn')
    iq = Queue(ctx=ctx)
    oq: Manager = ctx.Manager().Queue()

    tic(f"Creating input queue with task unit size {task_unit_size}",
        verbose=print_out)
    cnt_task_unit = 0
    for item in tqdm(slices__(data_iter, task_unit_size)):
        iq.put(item)
        cnt_task_unit += 1
    jobs = [None] * num_p
    for i in range(num_p):
        jobs[i] = ctx.Process(target=target, args=(i, iq, oq) + args)
    toc()

    tic(f"Working on {cnt_task_unit} task units with {num_p} processes",
        verbose=print_out)
    start_and_wait_jobs(jobs)

    out = []
    while not oq.empty():
        out.append(oq.get_nowait())
    toc()
    return out
예제 #2
0
 def put(self, contents: QT, block=True, timeout=None):
     self.lock.acquire()
     while not self.empty():
         Queue.get(self, block=False)
     # NOTE/TODO: this, because multiprocessing Queues are stupid, is
     # necessary. Explained in short, if you try to q.put_nowait() too
     # quickly, it breaks. For example, say you were in ipython,
     # and you typed to following
     # - q = MonoQueue()
     # - q.put_nowait(2)
     # - q.put_nowait(3)
     # - q.put_nowait(4)
     # - q.put_nowait(5)
     # EVEN THOUGH there is a Lock() to atomize the access to the Queue,
     # one of the non-first 'put_nowait()' calls will acquire the lock,
     # the 'self.empty()' call is apparently True, even though something is
     # actually in the queue, and then it will not '.get()' it and try to
     # put something in the queue, raise a 'Full' exception.
     # So basically, apparently if something tries to put in the queue too
     # quickly, everything breaks. And yes, I made a pytest to test this,
     # guess what, if you try to run a trace (debugger), aka you jus step
     # through, it works fine, but as soon as you just run it, it breaks.
     # UGH, maybe I'm dumb and am doing something wrong
     with suppress(Full):
         Queue.put(self, contents, block=block, timeout=timeout)
     self.lock.release()
예제 #3
0
class WorkerThreads(object):
    def __init__(self, threads=1):
        """
        Initialize the thread pool and queues.
        """
        self.pools = ThreadPool(processes=threads)
        self.updater_queue = Queue()

    def get_updater_queue(self):
        return self.updater_queue

    def updater(self, ident, state, meta):
        """
        Updater function: This just post a message to a queue.
        """
        self.updater_queue.put({'id': ident, 'state': state, 'meta': meta})

    def pull(self, request, updater, testmode=0):
        try:
            pull(request, updater, testmode=testmode)
        except Exception as err:
            resp = {'error_type': str(type(err)),
                    'message': str(err)}

            updater.update_status('FAILURE', 'FAILURE', response=resp)

    def expire(self, request, updater):
        try:
            remove_image(request, updater)
        except Exception as err:
            resp = {'error_type': str(type(err)),
                    'message': str(err)}
            updater.update_status('FAILURE', 'FAILURE', response=resp)

    def wrkimport(self, request, updater, testmode=0):
        try:
            img_import(request, updater, testmode=testmode)
        except Exception as err:
            resp = {'error_type': str(type(err)),
                    'message': str(err)}
            updater.update_status('FAILURE', 'FAILURE', response=resp)

    def dopull(self, ident, request, testmode=0):
        """
        Kick off a pull operation.
        """
        updater = Updater(ident, self.updater)
        self.pools.apply_async(self.pull, [request, updater],
                               {'testmode': testmode})

    def doexpire(self, ident, request, testmode=0):
        updater = Updater(ident, self.updater)
        self.pools.apply_async(self.expire, [request, updater])

    def dowrkimport(self, ident, request, testmode=0):
        logging.debug("wrkimport starting")
        updater = Updater(ident, self.updater)
        self.pools.apply_async(self.wrkimport, [request, updater],
                               {'testmode': testmode})
예제 #4
0
class WorkerThreads(object):
    def __init__(self, threads=1):
        """
        Initialize the thread pool and queues.
        """
        self.pools = ThreadPool(processes=threads)
        self.updater_queue = Queue()

    def get_updater_queue(self):
        return self.updater_queue

    def updater(self, ident, state, meta):
        """
        Updater function: This just post a message to a queue.
        """
        self.updater_queue.put({'id': ident, 'state': state, 'meta': meta})

    def pull(self, request, updater, testmode=0):
        try:
            pull(request, updater, testmode=testmode)
        except Exception as err:
            resp = {'error_type': str(type(err)),
                    'message': str(err)}

            updater.update_status('FAILURE', 'FAILURE', response=resp)

    def expire(self, request, updater):
        try:
            remove_image(request, updater)
        except Exception as err:
            resp = {'error_type': str(type(err)),
                    'message': str(err)}
            updater.update_status('FAILURE', 'FAILURE', response=resp)

    def wrkimport(self, request, updater, testmode=0):
        try:
            img_import(request, updater, testmode=testmode)
        except Exception as err:
            resp = {'error_type': str(type(err)),
                    'message': str(err)}
            updater.update_status('FAILURE', 'FAILURE', response=resp)

    def dopull(self, ident, request, testmode=0):
        """
        Kick off a pull operation.
        """
        updater = Updater(ident, self.updater)
        self.pools.apply_async(self.pull, [request, updater],
                               {'testmode': testmode})

    def doexpire(self, ident, request, testmode=0):
        updater = Updater(ident, self.updater)
        self.pools.apply_async(self.expire, [request, updater])

    def dowrkimport(self, ident, request, testmode=0):
        logging.debug("wrkimport starting")
        updater = Updater(ident, self.updater)
        self.pools.apply_async(self.wrkimport, [request, updater],
                               {'testmode': testmode})
예제 #5
0
def put_while(queue: Queue,
              task: Any,
              predicate: Callable[[], bool],
              timeout: int = 3):
    while predicate():
        try:
            queue.put(task, block=True, timeout=timeout)
            break
        except Full:
            pass
    def put(self, obj, block=True, timeout=None):
        Queue.put(self, obj, block, timeout)
        self._put_counter.value += 1

        if self.qsize() != 0:
            self.cond_notempty.acquire()
            try:
                self.cond_notempty.notify_all()
            finally:
                self.cond_notempty.release()
예제 #7
0
class BasicActor(ABC):
    name: Optional[str]
    in_queue: Queue
    out_queue: Queue
    alive: Value
    _loop_task: Optional[Task]

    def __init__(self, name=None):
        self.name = name
        self._state = None
        ctx = SpawnContext()
        self.alive = Value('b', True)
        self.in_queue = Queue(ctx=ctx, maxsize=120)
        self.out_queue = Queue(ctx=ctx, maxsize=110)

    async def runner(self):
        loop = get_running_loop()
        self._state = await self.handle_started()
        self._loop_task = loop.create_task(self._do_loop(loop))

    async def _do_loop(self, loop: AbstractEventLoop):
        while loop.is_running():
            try:
                sent_from, message = self.in_queue.get(timeout=0.1)
                loop.create_task(
                    self._handle_message(message, sent_from, self._state))
            except Empty:
                pass
            await asyncio.sleep(0)

    def send_message(self, to, message):
        if self.out_queue.qsize() > 100:
            logger.warning("Shedding excess outgoing message")
            return
        self.out_queue.put((to, message))

    async def _handle_message(self, message, sent_from, state):
        try:
            self._state = await self.handle_message(message, sent_from, state)
        except:
            self.stop()
            raise

    def stop(self):
        self.alive.value = False
        if self._loop_task:
            self._loop_task.cancel()

    @abstractmethod
    async def handle_message(self, message, sent_from, state) -> Any:
        pass

    @abstractmethod
    async def handle_started(self) -> Any:
        pass
예제 #8
0
def run(worker_fn: Callable[[], DataWorker], max_tasks: int, in_queue: Queue,
        out_queue: Queue, cancel_process: Value):
    # This is the worker function of a spawned process.
    # While the task does not receive a Finished flag, if fetches an input sample which is yielded in the worker.
    # The outputs are then written to the output queue.
    # Each thread is stopping if max_tasks is reached.
    logger.debug("Worker starting")
    worker = worker_fn()
    worker.initialize_thread()

    def generator():
        # Transform the input queue to a generator
        # Note that other processes read from the sample in_queue
        while True:
            try:
                s = in_queue.get(timeout=0.01)
            except Empty:
                logger.debug("In queue empty.")
                if cancel_process.value:
                    logger.debug("Canceling working generator.")
                    break
            else:
                if isinstance(s, Finished):
                    logger.debug(
                        "Received Finished. Stopping working generator")
                    break
                yield s

    for out in worker.process(generator()):
        # Process the data and write it to the queue
        while True:
            if cancel_process.value:
                logger.debug("Canceling working processor (inner).")
                break
            try:
                out_queue.put(out, timeout=0.01)
            except Full:
                logger.debug("Out queue Full.")
                continue
            else:
                break

        if cancel_process.value:
            logger.debug("Canceling working processor (outer).")
            break

        max_tasks -= 1
        if max_tasks == 0:
            logger.debug("Max tasks reached for this worker. Stopping.")
            break

    logger.debug("Worker finished")
    if cancel_process.value:
        out_queue.cancel_join_thread(
        )  # this prevents a deadlock if the generator is stopped prematurely
예제 #9
0
파일: ICOM_P.py 프로젝트: liuzhao1006/ICOM
    def __put(self, obj, block=True, timeout=None):
        if win_gui_Queue.__sync_flow_ctrl_count <= win_gui_Queue.__SAMPLE_SIZE:
            Queue.put(self, obj, block, timeout)
            if win_gui_Queue.__real_proc_data_msg_count > win_gui_Queue.__MAX_MSG_RATE_THREDHOLD:
                self.flow_ctrl(win_gui_Queue.__real_proc_data_msg_count *
                               win_gui_Queue.__SAMPLE_SIZE //
                               (win_gui_Queue.__MAX_MSG_RATE_THREDHOLD))
            return self.do_send_ctrl_msg(ICOM_CTRL_MSG.ID_PROC_DATA_MSG,
                                         win_gui_Queue.__SAMPLE_SIZE)

        self.send_data_sync_msg(obj)
예제 #10
0
def get_answer_from_mqtt(reply_queue: mQueue, topic: str,
                         broker_settings: dict, tls: dict) -> None:
    """
    Функция subscribe.simple() получает ответ из mqtt, который помещается в очередь.
    """

    answer = subscribe.simple(topic,
                              hostname=broker_settings["host"],
                              port=broker_settings["port"],
                              retained=False,
                              msg_count=1,
                              tls=tls)

    event_log.info("Received message from topic %s", topic)
    reply_queue.put(answer.payload.decode())
예제 #11
0
파일: actor.py 프로젝트: cyberj/pulsar
 def testDodgyActor(self):
     queue = Queue()
     yield self.spawn(actor_class=DodgyActor, max_requests=1,
                      ioqueue=queue, on_event=on_event)
     proxy = pulsar.get_actor().get_actor(self.a.aid)
     self.assertEqual(proxy.name, 'dodgyactor')
     queue.put(('request', 'Hello'))
     c = 0
     while c < 20:
         if not proxy.is_alive():
             break
         else:
             c += 1
             yield pulsar.NOT_DONE
     self.assertFalse(proxy.is_alive())
예제 #12
0
    def __init__(self, target, args, filename, cpus=cpu_count()):
        # macOS starts process with fork by default: https://zhuanlan.zhihu.com/p/144771768
        if platform.system() == "Darwin":
            set_start_method("fork")

        workerq = Queue()
        writerq = Queue()

        for a in args:
            workerq.put(a)

        cpus = min(cpus, len(args))
        for i in range(cpus):
            workerq.put(Poison())

        self.worker = Jobs(work, args=[(workerq, writerq, target)] * cpus)
        self.writer = Process(target=write,
                              args=(workerq, writerq, filename, cpus))
예제 #13
0
 def start_work(self, worker, work, num_jobs, *args, **kwargs):
     '''work should be and indexable sequence''' 
     wlen = len(work)
     if not wlen: return
     if self._counter is not None: self._counter.set_work(wlen)
     #determine number of jobs to start
     if not num_jobs: num_jobs = cpu_count
     #prepare jobs
     in_queue = Queue(wlen+num_jobs)
     self._jobs = [None]*num_jobs
     for j in xrange(num_jobs):
         queue = Queue()
         job   = UProcess(target=worker, args=(queue, self._abort_event, 
                                               in_queue, work)+args, kwargs=kwargs)
         job.daemon = self._daemonic
         self._jobs[j] = Job(job,queue)
     self.start_jobs()
     for i in xrange(wlen): in_queue.put(i, False)
     for j in xrange(num_jobs): in_queue.put(None, False)
def insert_known_authors(citations: List[Citation],
                         known_authors: Iterable[str], queue: Queue) -> None:
    known_authors_pattern = '|'.join(
        [re.escape(author.strip().lower()) for author in known_authors])

    multiple_authors_pattern = regex.compile(
        f'^({known_authors_pattern}){{e<=1}}(?: +(?:—|-) '
        fr'+({known_authors_pattern}){{e<=1}})+\.?\s+(\p{{Lu}}[^ .]+ .+)',
        regex.UNICODE | regex.IGNORECASE | regex.DOTALL)

    single_author_pattern = regex.compile(
        r'^({}){{e<=1}}\.?\s+(\p{{Lu}}[^ .]+ )'.format(known_authors_pattern),
        regex.UNICODE | regex.IGNORECASE)

    for citation in citations:
        citation_with_authors = (
            find_multiple_authors(citation, multiple_authors_pattern)
            or find_single_author(citation, single_author_pattern))
        if citation_with_authors:
            queue.put(reparse_citation(citation_with_authors))
 def put(self,element):
     '''
     Put the element in the queue
     Raises an exception if too many errors are
     encountered
     '''
     dt = 1e-3
     while dt < 1:
         try:
             Queue.put(self,element)
             return
         except IOError:
             logger.warning('IOError encountered in SafeQueue put()')
             try:
                 time.sleep(dt)
             except:pass
             dt *= 2
             
     e = IOError('Unrecoverable error')
     raise e
예제 #16
0
파일: mp_ext.py 프로젝트: XinliYu/utix
 def __call__(self, pid, iq: Queue, data, *args):
     if self.pass_each_data_item:
         it = chain(*(chunk_iter(self.create_iterator(dataitem, *args),
                                 chunk_size=self.chunk_size,
                                 as_list=True) for dataitem in data))
     else:
         it = chunk_iter(self.create_iterator(data, *args),
                         chunk_size=self.chunk_size,
                         as_list=True)
     hprint_message('initialized', f'{self.name}{pid}')
     while True:
         while not iq.full():
             try:
                 obj = next(it)
             except StopIteration:
                 return
             iq.put(obj)
         hprint_pairs(('full queue for', f'{self.name}{pid}'),
                      ('wait for', self._wait_time))
         sleep(self._wait_time)
예제 #17
0
    def put(self, element):
        '''
        Put the element in the queue
        Raises an exception if too many errors are
        encountered
        '''
        dt = 1e-3
        while dt < 1:
            try:
                Queue.put(self, element)
                return
            except IOError:
                logger.warning('IOError encountered in SafeQueue put()')
                try:
                    time.sleep(dt)
                except:
                    pass
                dt *= 2

        e = IOError('Unrecoverable error')
        raise e
예제 #18
0
    def process(self, input_queue: Queue, output_queue: Queue) -> None:
        database = FileDatabase(self.database_path)

        chunk = []

        file = input_queue.get()
        while not isinstance(file, TerminateOperand):
            correct_input, message = self.is_correct_input(file)
            if not correct_input:
                print(message)
                continue

            if self.process_item(file):
                chunk.append(file)

            if len(chunk) >= self.chunk_size:
                self.process_chunk(database, chunk)
                for chunklet in chunk:
                    output_queue.put(chunklet)

                chunk = []

            file = input_queue.get()

        input_queue.put(file)
        self.process_chunk(database, chunk)
        for chunklet in chunk:
            output_queue.put(chunklet)
예제 #19
0
 def start_work(self, worker, work, num_jobs, *args, **kwargs):
     '''work should be and indexable sequence'''
     wlen = len(work)
     if not wlen: return
     if self._counter is not None: self._counter.set_work(wlen)
     #determine number of jobs to start
     if not num_jobs: num_jobs = cpu_count
     #prepare jobs
     in_queue = Queue(wlen + num_jobs)
     self._jobs = [None] * num_jobs
     for j in xrange(num_jobs):
         queue = Queue()
         job = UProcess(target=worker,
                        args=(queue, self._abort_event, in_queue, work) +
                        args,
                        kwargs=kwargs)
         job.daemon = self._daemonic
         self._jobs[j] = Job(job, queue)
     self.start_jobs()
     for i in xrange(wlen):
         in_queue.put(i, False)
     for j in xrange(num_jobs):
         in_queue.put(None, False)
예제 #20
0
    def get_ai_move_mp(self,
                       queue: Queue,
                       setting: int,
                       player: int,
                       depth: int = 6):
        """Put result in queue (for multiprocessing)"""

        if setting == 1:
            queue.put(self.get_ai_move(player, depth))
        elif setting == 2:
            queue.put(self.get_perfect_move(player))
        else:
            queue.put(random.choice(self.valid_columns()))
예제 #21
0
    def get_ai_move_mp(self,
                       queue: Queue,
                       setting: int,
                       player: int,
                       depth: int = 6):
        """Put result in queue (for multiprocessing)"""

        if setting == 1:
            if random.choice([False, True, False]):
                queue.put(self.get_ai_move(player, depth + 1))
            else:
                queue.put(self.get_ai_move(player, depth))
        else:
            queue.put(random.choice(self.valid_columns()))
예제 #22
0
파일: ICOM_P.py 프로젝트: liuzhao1006/ICOM
    def send_data_sync_msg(self, msg_obj=None, msg_type=None, force_sync=None):
        need_send_ctrl_msg = False
        win_gui_Queue.__send_lock.acquire()
        win_gui_Queue.__msg_count += 1
        win_gui_Queue.__msg_reminder_count += 1
        data_msg_cnt = win_gui_Queue.__sync_flow_ctrl_count

        if force_sync is True:
            need_send_ctrl_msg = True
            data_msg_cnt = (win_gui_Queue.__msg_count +
                            win_gui_Queue.__msg_reminder_count
                            ) * win_gui_Queue.__SAMPLE_SIZE // 2
            win_gui_Queue.__msg_count = 0
            win_gui_Queue.__msg_reminder_count = 0
        elif win_gui_Queue.__msg_count >= win_gui_Queue.__flow_ctrl_interger:
            need_send_ctrl_msg = True
            win_gui_Queue.__msg_count = 0
        elif win_gui_Queue.__msg_reminder_count >= win_gui_Queue.__flow_ctrl_remainder > win_gui_Queue.__flow_ctrl_interger:
            need_send_ctrl_msg = True
            win_gui_Queue.__msg_reminder_count = 0

        if msg_obj:
            self.__data_queue_msg_count += 1
            if self.__buffer_msg:
                self.__buffer_msg.append(msg_obj)
            elif win_gui_Queue.__sync_flow_ctrl_count >= win_gui_Queue.__SAMPLE_SIZE * 5:
                self.__buffer_msg.append(msg_obj)
            else:
                Queue.put(self, msg_obj)

        if need_send_ctrl_msg is True:
            if self.__buffer_msg:
                Queue.put(self, self.__buffer_msg)
                self.__buffer_msg = []
            self.__data_queue_msg_count = 0
        elif len(self.__buffer_msg) > 3:
            Queue.put(self, self.__buffer_msg)
            self.__buffer_msg = []
        win_gui_Queue.__send_lock.release()
        if need_send_ctrl_msg is True:
            self.do_send_ctrl_msg(
                msg_type if msg_type else ICOM_CTRL_MSG.ID_PROC_DATA_MSG,
                data_msg_cnt)
예제 #23
0
def test_monitor_thread():
    """Test the monitor thread rerouting news to engine signal.

    """
    from atom.api import Value

    class E(BaseEngine):

        test = Value()

        def _observe_progress(self, val):
            self.test = val

    q = Queue()
    e = E()
    m = ThreadMeasureMonitor(e, q)
    m.start()
    q.put('test')
    q.put(('', ''))
    q.put((None, None))
    m.join()

    assert e.test == 'test'
예제 #24
0
def test_monitor_thread():
    """Test the monitor thread rerouting news to engine signal.

    """
    from atom.api import Value

    class E(BaseEngine):

        test = Value()

        def _observe_progress(self, val):
            self.test = val

    q = Queue()
    e = E()
    m = ThreadMeasureMonitor(e, q)
    m.start()
    q.put('test')
    q.put(('', ''))
    q.put((None, None))
    m.join()

    assert e.test == 'test'
예제 #25
0
파일: ApiBase.py 프로젝트: sys-git/epyrpc
class ApiBase(iApi, iIpcTransportDataReceiveListener):
    r"""
    @summary: The base-class to the top-level api object ONLY, not sub-apis.
    """
    DEFAULT_MAX_ASYNC_HANDLERS = 1
    def __init__(self, name, ns="", solicited=True, ignoreUnhandled=False, maxAsync=None):
        super(ApiBase, self).__init__(ns=ns, solicited=solicited, name=name)
        self._setup(ns=self._getNamespace(), solicited=self.solicited, ipc=self.ipc)
        self._dataRxCount = itertools.count(0)
        self._ignoreUnhandled = ignoreUnhandled
        if (maxAsync == None) or (maxAsync < 1):
            maxAsync = ApiBase.DEFAULT_MAX_ASYNC_HANDLERS
        self._maxAsync = maxAsync
        self._q = Queue()
        self._workers = []
        self._createAsyncWorkers()
        self.isAlive = True
    def _createAsyncWorkers(self, start=True):
        #    Create the thread pool to handle the api calls.
        for _ in range(0, self._maxAsync):
            thread = ApiAsyncWorker.create(self._q, self, start=start)
            self._workers.append(thread)
        self._logger.debug("Created workers.")
    def __del__(self):
        self.teardown()
    def teardown(self):
        if self is threading.current_thread(): return 
        if not self.isAlive: return
        self.isAlive = False
        #    Unfortunately we require time to stop the workers.
        self._logger.debug("Stopping async workers...")
        for _ in range(0, self._maxAsync):
            self._q.put(STOP())
        time.sleep(1)
        for worker in self._workers:
            worker.stop()
        for worker in self._workers:
            if worker.isAlive(): worker.join()
        self._workers = []
        self._q.close()
        time.sleep(1)
        del self._q
        self._q = None
        self._logger.debug("Stopped async workers (all daemon anyway).")
        #    Now un-bind our data-receive listener from the IPC:
        if self._ipc != None:
            self._ipc.setTransportDataReceiveListener(self)
        self._ipc = None
    def _newIpc(self):
        super(ApiBase, self)._newIpc()
        #    Now bind our data-receive listener to the IPC:
        self._ipc.setTransportDataReceiveListener(self)
    def transportDataReceive(self, tId, data):
        r"""
        @summary: Data is received that is NOT part of an existing transaction.
        We need to decide what to do with it...
        Recursively ask each of our sub-api's to decode the data and handle it.
        If no one can, then return UnsupportedApiError() (unless we consume it with:
        self._ignoreUnhandled==True).
        The handlers will have previously been set by the controlling entity, ie:
        ExecutionOrganiser, Head.
        This method always returns NoResponseRequired, making the call asynchronous.
        """
        myNsPrefix = self._getNamespacePrefix()
        try:
            count = self._dataRxCount.next()
            if isinstance(data, iApiTransportItem):
                ns = data.ns()
                if self._isInMyNamespace(ns):
                    self._findHandler(ns)
                    args = data.args()
                    kwargs = data.kwargs()
                    synchronous = True
                    self._q.put(KNOWN(ns, tId, synchronous, count, args, kwargs))
                    raise NoResponseRequired(ns)
            else:
                #    Inform our listener about the data that we can't handle:
                handler = self.transportDataReceiveListener
                if handler != None:
                    self._q.put(UNKNOWN(tId, data))
                raise NoResponseRequired(myNsPrefix)
        except UnsupportedApiError, e:
            if self._ignoreUnhandled == False:
                #    Propagate exception directly as before.
                raise
            #    Consume silently:
            self._logger.debug("UnsupportedApiError: %(NS)s" % {"NS":e.ns()})
            raise NoResponseRequired(myNsPrefix, e)
예제 #26
0
class ActionGeneratorDataFormer(object):
    class AuthorAdder(Process):
        def __init__(self, queue, outer):
            super(ActionGeneratorDataFormer.AuthorAdder, self).__init__(name="author_adder")
            self.q = queue
            self.outer = outer

        def run(self):
            log.info("author adder started")
            while 1:
                try:
                    author = self.q.get()
                    r_author = self.outer._get_author_object(author)
                    c_karma = r_author.__dict__.get("comment_karma", 0)
                    l_karma = r_author.__dict__.get("link_karma", 0)
                    if c_karma > AE_MIN_COMMENT_KARMA and l_karma > AE_MIN_LINK_KARMA:
                        log.info("will add [%s] for action engine" % (author))
                        self.outer._add_author_data(r_author)
                except Exception as e:
                    log.exception(e)

    def __init__(self):
        self._storage = AuthorsStorage("author_generator_data_former")
        self._r = praw.Reddit(user_agent=choice(USER_AGENTS))
        self._queue = Queue()

        adder = ActionGeneratorDataFormer.AuthorAdder(self._queue, self)
        adder.start()

    def is_author_added(self, author):
        found = self._storage.steps.find_one({"author": author})
        return found is not None

    def save_action(self, author, action_type, time, end_time=None):
        q = {"author": author, "action_type": action_type}
        if isinstance(time, datetime):
            q["time"] = time_hash(time)
        elif isinstance(time, int):
            q["time"] = time

        if end_time:
            q["end_time"] = end_time

        found = self._storage.steps.find_one(q)
        if found:
            self._storage.steps.update_one(q, {"$inc": {"count": 1}})
        else:
            q["count"] = 1
            self._storage.steps.insert_one(q)

    def revert_sleep_actions(self, group_id=None):
        q = {'end_time': {'$exists': True}}
        if group_id:
            q["used"] = group_id
        self._storage.steps.delete_many(q)

    def fill_consume_and_sleep(self, authors_min_actions_count=AE_AUTHOR_MIN_ACTIONS, min_sleep=AE_MIN_SLEEP_TIME,
                               max_sleep=AE_MAX_SLEEP_TIME):
        for author in self._storage.get_interested_authors(authors_min_actions_count):
            start_time, end_time = 0, 0
            actions = self._storage.steps.find({"author": author}).sort("time", 1)
            for i, action in enumerate(actions):
                if i == 0:
                    start_time = action.get("time")
                    continue

                end_time = action.get("time")
                delta = (end_time - start_time)
                if delta > min_sleep and delta < max_sleep:
                    self.save_action(author, A_SLEEP, start_time, end_time)
                start_time = end_time

            log.info("Was update consume and sleep steps for %s" % author)

    def _get_author_object(self, author_name):
        r_author = self._r.get_redditor(author_name, fetch=True)
        return r_author

    def _get_data_of(self, r_author):
        try:
            cb = list(r_author.gets(sort="new", limit=1000))
            sb = list(r_author.get_submitted(sort="new", limit=1000))
            return cb, sb
        except Exception as e:
            log.exception(e)
        return [], []

    def _add_author_data(self, r_author):
        log.info("will retrieve comments of %s" % r_author.name)
        _comments, _posts = self._get_data_of(r_author)
        for comment in _comments:
            self.save_action(r_author.name, A_COMMENT, datetime.fromtimestamp(comment.created_utc))

        for submission in _posts:
            self.save_action(r_author.name, A_POST, datetime.fromtimestamp(submission.created_utc))

        log.info("fill %s comments and %s posts" % (len(_comments), len(_posts)))

    def add_author_data(self, author):
        if not self.is_author_added(author):
            self._queue.put(author)
예제 #27
0
class Channel(object):

    # Maximum size of shared memory array, just a safety check it's not set to something ridiculous
    MAX_BUFFER_SIZE = 256 * 1024 * 1024

    def __init__(self, buffer_size):
        # type: (int) -> None

        self.buffer_size = buffer_size

        if self.buffer_size > Channel.MAX_BUFFER_SIZE:
            raise Exception(
                "{} exceeds allowable size ({}) of shared memory to allocate".
                format(self.buffer_size, Channel.MAX_BUFFER_SIZE))
        else:
            self.__queue = Queue()

            # Create the shared memory array
            self.__shared_array = RawArray(c_char, self.buffer_size)

            # Tracks who has been given permission to write to the shared array
            self.__owner = None

    def put(self, sending_worker_name, packet):
        # type: (str, PacketBase) -> None

        # if sending_worker_name is None:
        #     warnings.warn("Put packet with no sender {}".format(packet))

        packet.set_sender(sending_worker_name)

        if not isinstance(packet, PacketBase):
            raise Exception("Packet {} is not of type {}".format(
                packet, PacketBase))

        self.__pickle_and_put(packet)

    def get(self):
        packet = self.__get_and_unpickle()

        # if packet.sender_name is None:
        #     warnings.warn("Get packet with no sender {}".format(packet))

        return packet

    def __pickle_and_put(self, packet):
        # type: (PacketBase) -> None
        pickled_packet = self.__pickle(packet)
        self.__queue.put(pickled_packet)

    @staticmethod
    def __pickle(packet):
        return cPickle.dumps(packet, cPickle.HIGHEST_PROTOCOL)

    def available(self):
        return self.__owner is None

    def release(self):
        self.__owner = None

    def acquire(self, worker_name):
        if self.__owner is None or self.__owner == worker_name:
            self.__owner = worker_name
        else:
            raise Exception(
                "Shared array is owned by '{}'. '{}' cannot acquire.".format(
                    self.__owner, worker_name))

    def copy_array_to_buffer(self, ndarray):
        # type: (np.ndarray) -> None

        total_bytes = ndarray.nbytes
        if total_bytes > self.buffer_size:
            raise Exception(
                "Numpy array ({} bytes) exceeds capacity of shared memory ({} bytes)."
                .format(total_bytes, self.buffer_size))
        else:
            # Reshape the source array into 1 row, wrap the shared array and copy the ndarray into the wrapper
            ndarray = ndarray.reshape(1, ndarray.shape[0] * ndarray.shape[1])
            shared_array_wrapper = np.frombuffer(self.__shared_array,
                                                 dtype=ndarray.dtype,
                                                 count=ndarray.size)
            shared_array_wrapper[0:ndarray.shape[0] *
                                 ndarray.shape[1]] = ndarray

    def copy_array_from_buffer(self, df_shape, dtype):
        # type: (tuple, str) -> pd.DataFrame

        num_elements = df_shape[0] * df_shape[1]
        num_bytes = num_elements * np.dtype(dtype).itemsize
        if num_bytes > self.buffer_size:
            raise Exception(
                "Array size {} exceeds capacity of shared memory {}.".format(
                    num_bytes, self.buffer_size))
        else:
            # Wrap the shared array, reshape the shared array and reconstruct the dataframe
            shared_array_wrapper = np.frombuffer(self.__shared_array,
                                                 dtype=dtype,
                                                 count=num_elements)
            shaped_array = shared_array_wrapper.reshape(
                df_shape[0], df_shape[1])
            df = pd.DataFrame(shaped_array, copy=False)
            return df

    def __get_and_unpickle(self):
        pickled_msg = self.__queue.get()
        msg = self.__unpickle(pickled_msg)
        return msg

    @staticmethod
    def __unpickle(pickled_msg):
        return cPickle.loads(pickled_msg)

    def close(self):
        self.__queue.close()
예제 #28
0
class YubiGuard:
    def __init__(self, scrlck_mode=False):
        self.scrlck_mode = scrlck_mode

        self.id_q = Queue()
        self.on_q = Queue()
        self.pi_q = Queue()

        # init processes
        gi_proc = Process(target=self.get_ids)
        gi_proc.daemon = True

        cs_proc = Process(target=self.change_state)
        # no daemon, or main program will terminate before Keys can be unlocked
        cs_proc.daemon = False

        zmq_lis = ZmqListener(
            self.on_q)  # somehow works ony with threads not processes
        zmq_lis_thr = Thread(target=zmq_lis.start_listener)
        zmq_lis_thr.setDaemon(True)

        pi = PanelIndicator(self.pi_q, self.on_q)

        # starting processes and catching exceptions:
        try:
            gi_proc.start()
            cs_proc.start()
            zmq_lis_thr.start()

            pi.run_pi()  # main loop of root process

        except (KeyboardInterrupt, SystemExit):
            print('Caught exit event.')

        finally:
            # send exit signal, will reactivate YubiKey slots
            print('Sending EXIT_SIGNAL')
            self.on_q.put(EXIT_SIGNAL)

    def get_ids(self):
        old_id_l = []
        no_key = True
        pat = re.compile(r"(?:Yubikey.*?id=)(\d+)", re.IGNORECASE)
        while True:
            new_id_l = []
            # get list of xinput device ids and extract those of YubiKeys:

            xinput = shell_this('xinput list')
            matches = re.findall(pat, xinput)
            new_id_l.extend(matches)
            new_id_l.sort()

            if not new_id_l and not no_key:
                self.pi_q.put(NOKEY_SIGNAL)
                print('No YubiKey(s) detected.')
                no_key = True
            elif new_id_l and no_key:
                self.pi_q.put(OFF_SIGNAL)
                print('YubiKey(s) detected.')
                no_key = False
                # notify:
                msg_cmd = """notify-send --expire-time=2000 \
                'YubiKey(s) detected.'"""
                shell_this(msg_cmd)

            if new_id_l != old_id_l:
                print('Change in YubiKey ids detected. From {} to {}.'.format(
                    old_id_l, new_id_l))
                self.id_q.put(new_id_l)

                # lock screen if screenlock and YubiKey is removed:
                if self.scrlck_mode and len(new_id_l) < len(old_id_l):
                    print('Locking screen.')
                    shell_this(get_scrlck_cmd())  # execute screen lock command

            old_id_l = new_id_l

            time.sleep(.1)

    def turn_keys(self, id_l, lock=True
                  ):  # problem of value loss of cs_id_l found in this function
        tk_id_l = id_l
        if lock:
            print('Locking YubiKey(s).')
            state_flag = '0'
            self.pi_q.put(OFF_SIGNAL)
        else:
            print('Unlocking YubiKey(s).')
            state_flag = '1'
            self.pi_q.put(ON_SIGNAL)

        shell_this('; '.join(["xinput set-int-prop {} \"Device Enabled\" 8 {}".
                              format(tk_id, state_flag) for tk_id in tk_id_l]))

    def check_state(self, check_id_l):
        # check if all states have indeed changed:
        pat = re.compile(r"(?:Device Enabled.+?:).?([01])", re.IGNORECASE)
        # check if state has indeed changed:

        for tk_id in check_id_l:
            sh_out = shell_this('xinput list-props {}'.format(tk_id))
            match = re.search(pat, sh_out)
            if match:
                if match.group(1) != '0':
                    return False

    def change_state(self):
        cs_id_l = []
        cs_signal = ''

        while True:
            # retrieve input from queues
            while self.id_q.qsize() > 0:
                cs_id_l = self.id_q.get()

            while self.on_q.qsize() > 0:
                cs_signal = self.on_q.get()
                # not accepting any more signals
                if cs_signal == EXIT_SIGNAL:
                    self.turn_keys(cs_id_l, lock=False)
                    sys.exit(0)

            # lock/unlock
            if cs_id_l:
                if cs_signal == ON_SIGNAL:
                    self.turn_keys(cs_id_l, lock=False)

                    mon_thread = Thread(
                        target=self.yk_monitor, args=(cs_id_l, ))
                    mon_thread.start()
                    mon_thread.join()

                    # putting in separator, nullifying all preceding ON_SIGNALS
                    # to prevent possible over-triggering:
                    self.on_q.put('')

                elif self.check_state(
                        cs_id_l) is False:  # lock keys if they are unlocked
                    self.turn_keys(cs_id_l, lock=True)
            # reset state to prevent continued unlocking/locking
            cs_signal = ''
            time.sleep(.01)

    def yk_monitor(self, mon_l):
        # forming command to run parallel monitoring processes
        mon_cmd = ' & '.join(["xinput test {}".format(y_id) for y_id in mon_l])
        monitor = subprocess.Popen(mon_cmd, shell=True, stdout=subprocess.PIPE)

        stdout_queue = Queue()
        stdout_reader = AsynchronousFileReader(monitor.stdout, stdout_queue)
        stdout_reader.start()

        triggered = False
        timestamp = time.time()
        while not stdout_reader.eof and time.time() - timestamp < TIMEOUT:
            while stdout_queue.qsize() > 0:
                stdout_queue.get()  # emptying queue
                triggered = True
                time.sleep(.01)
            if triggered:
                print('YubiKey triggered. Now disabling.')
                break

            time.sleep(.001)
        if not triggered:
            print('No YubiKey triggered. Timeout.')
예제 #29
0
class ComponentTestCase(TestCase):
    def setUp(self):
        self.notif_queue = Queue(1)
        self.error_queue = Queue()
        self.component = Component(self.notif_queue._reader,
                                   CommonErrorStrategy(),
                                   self.error_queue,
                                   PostgresConnector(_POSTGRES_DSN))
        self.component.log = MagicMock()

    def tearDown(self):
        # Component can have an attribute error on _parent_pid due to the fact
        # that we defer super.__init__() until start()
        if self.component.is_alive():
            self.component.terminate()
            self.component.join()

    @patch('hermes.components.select.select', side_effect=InterfaceError)
    def test_component_interface_error(self, select_module):
        # Start the component and let it settle
        self.component.start()
        sleep(1)
        self.assertFalse(self.component.is_alive())

    def test_component_select_error(self):
        """
        Due to process memory isolation we must mock the cleaup to put a
        pre-defined string into the error queue.
        """
        with patch('hermes.components.select.select',
                   side_effect=select.error):
            self.component.start()
            sleep(1)

            # Ensure the string PUT by the process is the same as what
            # was returned by queue.
            self.assertFalse(self.component.is_alive())

    def test_not_implemented_exception_on_execute(self):
        exception_occurred = False
        try:
            self.component.execute(None)
        except NotImplementedError:
            exception_occurred = True
        self.assertTrue(exception_occurred)

    def test_exits_on_terminate(self):
        self.component.start()
        sleep(1)
        self.component.terminate()
        sleep(1)
        self.assertFalse(self.component.is_alive())

    def test_execute_called_on_notification(self):
        error_string = util.rand_string(10)

        def mock_func(*args, **kwargs):
            """
            The process will have isolated this function, as well as the
            error queue.
            """
            self.component.error_queue.put(error_string)

        with patch('hermes.components.Component.execute') as mock_execute:
            mock_execute.side_effect = mock_func

            self.component.start()
            sleep(2)
            self.assertTrue(self.component.is_alive())

            self.notif_queue.put(1)

            return_string = self.error_queue.get(timeout=2)
            self.assertEqual(error_string, return_string)

    def test_execute_done_called_on_notification(self):
        error_string = util.rand_string(10)

        def mock_func(*args, **kwargs):
            """
            The process will have isolated this function, as well as the
            error queue.
            """
            self.component.error_queue.put(error_string)

        self.component.post_execute = MagicMock()
        self.component.post_execute.side_effect = mock_func

        self.component.execute = MagicMock()

        self.component.start()
        self.notif_queue.put(1)

        return_string = self.error_queue.get()
        self.assertEqual(error_string, return_string)

    def test_error_received_on_exception_in_execute(self):
        mock_execption_return = (False, util.rand_string(10))

        error_strat = AbstractErrorStrategy()
        error_strat.handle_exception = MagicMock(
            return_value=mock_execption_return
        )

        with patch('hermes.components.Component.execute',
                   side_effect=Exception):
            self.component.error_strategy = error_strat

            # Start the component and let it settle
            self.component.start()
            sleep(1)
            self.notif_queue.put(True)

            exception = self.error_queue.get(timeout=1)

        self.assertEqual(mock_execption_return, exception)

    def test_component_process_reuse(self):
        self.component.start()
        sleep(1)
        self.component.terminate()
        self.component.join()
        sleep(1)
        self.component.start()
        sleep(1)
        self.assertTrue(self.component.is_alive())

    def test_isalive_is_false_on_attr_error(self):
        self.assertRaises(AttributeError, lambda: self.component._popen)
        return_value = self.component.is_alive()
        self.assertFalse(return_value)

    def test_ident_is_none_on_attr_error(self):
        self.assertRaises(AttributeError, lambda: self.component._popen)
        return_value = self.component.ident
        self.assertIsNone(return_value)

    def test_join_returns_on_no_process(self):
        self.assertRaises(AttributeError, lambda: self.component._popen)
        return_value = self.component.join()
        self.assertIsNone(return_value)

    def test_execute_gets_notification_and_calls_execute_funcs(self):
        self.component._should_run = LimitedTrueBool(1)
        self.component._backoff_time = randint(1, 10000)

        self.component.execute = MagicMock()
        self.component.post_execute = MagicMock()
        self.component.pre_execute = MagicMock()

        # Put the notification so it will immediately return from select
        self.notif_queue.put(True)

        self.component._execute()

        self.assertEqual(self.component.post_execute.call_count, 1)
        self.assertEqual(self.component.execute.call_count, 1)
        self.assertEqual(self.component.pre_execute.call_count, 1)

        self.assertEqual(self.component.__backoff_time__, 0)

    def test_main_loop_is_called(self):
        with patch('hermes.log.get_logger'):
            with patch('multiprocessing.Process.start'):
                self.component.set_up = MagicMock()
                self.component._execute = MagicMock()
                self.component.tear_down = MagicMock(side_effect=Exception)
                self.component.start()

                self.assertRaises(Exception, self.component.run)

                self.assertEqual(self.component.set_up.call_count, 1)
                self.assertEqual(self.component._execute.call_count, 1)
                self.assertEqual(self.component.tear_down.call_count, 1)

    def test_breaks_on_interrupt(self):
        with patch('hermes.log.get_logger'):
            with patch('multiprocessing.Process.start'):
                self.component.set_up = MagicMock(side_effect=select.error)
                self.component.tear_down = MagicMock(side_effect=Exception)
                self.component.start()

                self.assertRaises(Exception, self.component.run)

                self.assertEqual(self.component.set_up.call_count, 1)
                self.assertEqual(self.component.tear_down.call_count, 1)
예제 #30
0
def run_net_sim((net, args, kwargs)):
    print('process id:' + str(os.getpid()) + " Starting...")
    Y = net.sim(*args, **kwargs)
    print('process id:' + str(os.getpid()) + " Done!")
    return Y

if __name__ == '__main__':
    queue = Queue()
    w = Worker(queue)
    w.start()
    # To trigger the problem, any non-pickleable object is to be passed here.
    args = ['hej', 'hopp']
    kwargs = {}
    kwargs['one'] = 1
    queue.put((car(), args, kwargs))
    w.join()
    
    #Now try with a neural network
    
    P1, T1 = loadsyn1(100000)
    P2, T2 = loadsyn1(100000)
    P3, T3 = loadsyn1(100000)
    P4, T4 = loadsyn1(100000)
    P5, T5 = loadsyn1(100000)
    P6, T6 = loadsyn1(100000)
    P7, T7 = loadsyn1(100000)
    P8, T8 = loadsyn1(100000)
                
    net = build_feedforward(2, 1, 1)
    
예제 #31
0
class Agent(object):
    def __init__(self, pull_interval=5):

        self.input = None
        self.filter = None
        self.output = None

        # for input write and filter read
        self.iqueue = Queue()
        # for filter write and output read
        self.oqueue = Queue()

        self.pull_interval = pull_interval

        self.__init_all()

    def __init_all(self):
        self.__set_filter()
        self.__set_output()
        self.__set_input()

    def __set_input(self):
        input_ins.set_res_queue(self.iqueue)

        def target():
            while True:
                # pull_data must be realized in input handler
                task_list = Task.create_from_conf(input_ins, AGENT_INPUT,
                                                  'pull_data')
                if not task_list:
                    time.sleep(self.pull_interval)
                list_task = []
                for task in task_list:
                    t = threading.Thread(target=task)
                    t.setDaemon(True)
                    t.start()
                    list_task.append(t)
                for task in list_task:
                    task.join()
                time.sleep(self.pull_interval)

        p = multiprocessing.Process(target=target)
        p.daemon = True
        p.start()

        logging.debug('{0} start input handlers ...'.format(
            self.__class__.__name__))

    def __set_output(self):
        def target():
            while True:
                data = self.oqueue.get()
                # push_data must be realized in output handler
                task_list = Task.create_from_conf(output_ins, AGENT_OUTPUT,
                                                  'push_data')
                if not task_list:
                    continue
                list_task = []
                for task in task_list:
                    t = threading.Thread(target=task, args=(data, ))
                    t.setDaemon(True)
                    t.start()
                for task in list_task:
                    task.join()

        p = multiprocessing.Process(target=target)
        p.daemon = True
        p.start()

        logging.debug('{0} start out handlers ...'.format(
            self.__class__.__name__))

    def __set_filter(self):
        def target():
            while True:
                data = self.iqueue.get()
                task_list = Task.create_from_conf(filter_ins, AGENT_FILTER,
                                                  'filter_data')
                filtered_data = data
                for task in task_list:
                    filtered_data = task(filtered_data)

                self.oqueue.put(filtered_data)

        p = multiprocessing.Process(target=target)
        p.daemon = True
        p.start()

        logging.debug('{0} start filter handlers ...'.format(
            self.__class__.__name__))

    def loop(self):
        logging.debug('{0} start successfully!'.format(
            self.__class__.__name__))
        # as main block process
        while True:
            time.sleep(1)
예제 #32
0
 def put(self, obj, block=True, timeout=None):
     """Used when a task is put first time"""
     Queue.put(self, obj, block, timeout)
     self._tasks_lock.acquire()
     self._tasks.value += 1
     self._tasks_lock.release()
예제 #33
0
 def put_back(self, obj, block=True, timeout=None):
     """Used when a task is put back to be processed more"""
     Queue.put(self, obj, block, timeout)
class BulbControl(Process):
    def __init__(self, my_id, bpm, host, leader_id, state_q, bulb_objects_list, turned_on_list):
        """
        Initialize BulbControl process, set environment variables.
        
        :param my_id: id ranging from 0-12, specifying bulb position
        :type my_id: int 
        :param bpm: pulse rate in beats per minute
        :type bpm: int
        :param host: ip address of Ubiquiti strip with relevant relay
        :type host: string (e.x. "192.168.1.20")
        :param leader_id: array of length 1, value of leader id ranging from 0-12
        :type leader_id: array of 1 integer
        :param state_q: Queue of bulb states corresponding to on/off messages
        :type state_q: BulbQueue (see BulbQueue.py)
        :param turned_on_list: List of bulbs turned on (process-safe) 
        :type turned_on_list: Process safe list of c_type ints 
        """
        super(BulbControl, self).__init__()
        self.id = my_id
        self.bpm = bpm
        self.host = host
        self.leader_id = leader_id
        self.state_q = state_q
        self.bulb_objects_list = bulb_objects_list
        self.turned_on_list = turned_on_list

        # used to send messages to BulbBlinker
        self.adjustment = Queue()

        # used to track message receipt times
        self.time_of_last_blink = None
        self.time_of_neighbor_below = None
        self.time_of_neighbor_above = None

        self.above_bulb_id = (self.id + 1) % 13
        self.below_bulb_id = (self.id - 1) % 13

    def check_ordering(self):
        """
        Receive messages from self and neighbors. Decide which neighbor to trust. 
        Calculate necessary adjustment to time of bulb blinking. 
        Send adjustment message to child process, BulbBlinker
        
        :return: None
        """
        while True:
            if os.getppid() == 1: 
                print "TERMINATE"
                os.kill(os.getpid(), 9)
            if self.id == self.leader_id[0]:
                # I am the leader.
                # Go to sleep for a bit; don't adjust synchronization.
                time.sleep(5)
            else:
                # I am not the leader.
                # I want to find the time difference between myself 
                # and the leader, through my neighbors.
                #
                # Find which neighbor of mine is closer to the leader.
                steps_to_above = bulb_id_distance( self.leader_id[0], self.above_bulb_id )
                steps_to_below = bulb_id_distance( self.leader_id[0], self.below_bulb_id )

                # if my +1 neighbor is closer, set neighbor = 1; otherwise, -1
                neighbor = 1 if steps_to_above < steps_to_below else -1
                
                # Loop until I have a message from my leading neighbor and a message from myself
                tPrev = datetime.now()
                relevant_neighbor_time = tPrev
                
                while True:
                    
                    # This is the point at which this process first pauses
                    # It waits for all bulbs to have started blinking at least once
                    # Then, self.state_q begins to be filled...
                    
                    if not self.state_q.empty():
                        # Get bulb uuid of either my self or my neighbors
                        message = self.state_q.get()
                        time_received_message = datetime.now()

                        # If I 'sent' the message
                        if message == str(self.id):
                            self.time_of_last_blink = time_received_message
                            if self.id == 1:
                                print "Received self message " + str(time_received_message)
                            if relevant_neighbor_time != tPrev:
                                break
                                
                        # If my trusted neighbor sent the message
                        elif message == str((self.id + neighbor) % 13):
                            relevant_neighbor_time = time_received_message
                            if self.id == 1:
                                print "Received trusted neighbor message " + str(time_received_message)
                            
                tBreak = datetime.now()

                if self.id == 1:
                    print "I, " + str(self.id) + " broke out after " + str((tBreak-tPrev).total_seconds())
                
                # I now have:
                # - The last time that I received a message from my trusted neighbor that it pulsed.
                # - The last time that I pulsed
                

                # Compare last received message from neighbor to neighbor's expected future tick
                # Set my time difference based on which one I'm closer to
                halfTimePhase = (60.0 / float(self.bpm)) * 2
                diff = self.time_of_last_blink - relevant_neighbor_time
                
                if self.id == 1:
                    print "diff.total_seconds " + str( diff.total_seconds() )
                assert( diff.total_seconds() >= 0 )
                waitTime = 0;
                
                if diff.total_seconds() < halfTimePhase:
                    # Previous pulse start is closer
                    # Wait the extra part to bring me into phase
                    waitTime = (2 * halfTimePhase) - diff.total_seconds()
                    
                else:
                    # Future pulse start is closer
                    # Wait the difference to bring me into phase
                    waitTime = diff.total_seconds()
                
                if diff.total_seconds() > halfTimePhase and diff.total_seconds() < (2*halfTimePhase - 0.125):
                    waitTime = 0.25 # 2*halfTimePhase - diff.total_seconds() # 0.25
                elif diff.total_seconds() > 0.125 and diff.total_seconds() < halfTimePhase:
                    waitTime = -0.25 # -diff.total_seconds # -0.25
                else:
                    waitTime = 0
                
                # Slow down the adjustment to 'ease' the system into a stable state.
                adjustmentFactor = 1.0
                self.adjustment.put( waitTime * adjustmentFactor )
                
                if self.id == 1:
                    print ("I, " + str(self.id) + " had times: self: " + 
                           str(self.time_of_last_blink) + " / " + str(relevant_neighbor_time) + 
                           "  Adjustment: " + str(waitTime * adjustmentFactor) + 
                           " at " + str(datetime.now()))
                
                time.sleep(0.00001)
                
                sys.stdout.flush();
      

    def run(self):  
        """
        Create a child process to turn bulb on and off
        Start self continually checking ordering of bulbs 
        """
        my_bulb = BulbBlinker(  my_id = self.id,
                                bpm = self.bpm, 
                                host = self.host,
                                adjustment = self.adjustment,
                                bulb_objects_list = self.bulb_objects_list, 
                                above_neighbor = (self.id + 1) % 13, 
                                below_neighbor = (self.id - 1) % 13, 
                                turned_on_list = self.turned_on_list)
        my_bulb.daemon = True 
        my_bulb.start()
        self.check_ordering()
예제 #35
0
파일: engine.py 프로젝트: zoulianmp/HQCMeas
class ProcessEngine(BaseEngine):
    """ An engine executing the measurement it is sent in a different process.

    """

    # --- Public API ----------------------------------------------------------

    #: Reference to the workbench got at __init__
    workbench = Typed(Workbench)

    def prepare_to_run(self, name, root, monitored_entries, build_deps):

        runtime_deps = root.run_time

        # Get ConfigObj describing measure.
        root.update_preferences_from_members()
        config = root.task_preferences

        # Make infos tuple to send to the subprocess.
        self._temp = (name, config, build_deps, runtime_deps,
                      monitored_entries)

        # Clear all the flags.
        self._meas_pause.clear()
        self._meas_paused.clear()
        self._meas_stop.clear()
        self._stop.clear()
        self._force_stop.clear()
        self._stop_requested = False

        # If the process does not exist or is dead create a new one.
        if not self._process or not self._process.is_alive():
            self._pipe, process_pipe = Pipe()
            self._process = TaskProcess(process_pipe,
                                        self._log_queue,
                                        self._monitor_queue,
                                        self._meas_pause,
                                        self._meas_paused,
                                        self._meas_stop,
                                        self._stop)
            self._process.daemon = True

            self._log_thread = QueueLoggerThread(self._log_queue)
            self._log_thread.daemon = True

            self._monitor_thread = ThreadMeasureMonitor(self,
                                                        self._monitor_queue)
            self._monitor_thread.daemon = True

            self._pause_thread = None

        self.measure_status = ('PREPARED', 'Engine ready to process.')

    def run(self):
        if not self._process.is_alive():
            # Starting monitoring threads.
            self._log_thread.start()
            self._monitor_thread.start()

            # Start process.
            self._process.start()
            self.active = True

            # Start main communication thread.
            self._com_thread = Thread(group=None,
                                      target=self._process_listener)
            self._com_thread.start()

        self._starting_allowed.set()

        self.measure_status = ('RUNNING', 'Measure running.')

    def pause(self):
        self.measure_status = ('PAUSING', 'Waiting for measure to pause.')
        self._meas_pause.set()

        self._pause_thread = Thread(target=self._wait_for_pause)
        self._pause_thread.start()

    def resume(self):
        self._meas_pause.clear()
        self.measure_status = ('RUNNING', 'Measure have been resumed.')

    def stop(self):
        self._stop_requested = True
        self._meas_stop.set()

    def exit(self):
        self._stop_requested = True
        self._meas_stop.set()
        self._stop.set()
        # Everything else handled by the _com_thread and the process.

    def force_stop(self):
        self._stop_requested = True
        # Just in case the user calls this directly. Will signal all threads to
        # stop (save _com_thread).
        self._stop.set()
        self._log_queue.put(None)
        self._monitor_queue.put((None, None))

        # Set _force_stop to stop _com_thread.
        self._force_stop.set()

        # Terminate the process and make sure all threads stopped properly.
        self._process.terminate()
        self._log_thread.join()
        self._monitor_thread.join()
        self._com_thread.join()
        self.active = False
        if self._processing.is_set():
            self.done = ('INTERRUPTED', 'The user forced the system to stop')
            self._processing.clear()

        # Discard the queues as they may have been corrupted when the process
        # was terminated.
        self._log_queue = Queue()
        self._monitor_queue = Queue()

    def force_exit(self):
        self.force_stop()

    # --- Private API ---------------------------------------------------------

    #: Flag indicating that the user requested the measure to stop.
    _stop_requested = Bool()

    #: Interprocess event used to pause the subprocess current measure.
    _meas_pause = Typed(Event, ())

    #: Interprocess event signaling the subprocess current measure is paused.
    _meas_paused = Typed(Event, ())

    #: Interprocess event used to stop the subprocess current measure.
    _meas_stop = Typed(Event, ())

    #: Interprocess event used to stop the subprocess.
    _stop = Typed(Event, ())

    #: Flag signaling that a forced exit has been requested
    _force_stop = Value(tEvent())

    #: Flag indicating the process is waiting for a measure.
    _processing = Value(tEvent())

    #: Flag indicating the communication thread it can send the next measure.
    _starting_allowed = Value(tEvent())

    #: Temporary tuple to store the data to be sent to the process when a
    #: new measure is ready.
    _temp = Tuple()

    #: Current subprocess.
    _process = Typed(TaskProcess)

    #: Connection used to send and receive messages about execution (type
    #: ambiguous when the OS is not known)
    _pipe = Value()

    #: Thread in charge of transferring measure to the process.
    _com_thread = Typed(Thread)

    #: Inter-process queue used by the subprocess to transmit its log records.
    _log_queue = Typed(Queue, ())

    #: Thread in charge of collecting the log message coming from the
    #: subprocess.
    _log_thread = Typed(Thread)

    #: Inter-process queue used by the subprocess to send the values of the
    #: observed database entries.
    _monitor_queue = Typed(Queue, ())

    #: Thread in charge of collecting the values of the observed database
    #: entries.
    _monitor_thread = Typed(Thread)

    #: Thread in charge to notify the engine that the measure did pause after
    #: being asked to do so.
    _pause_thread = Typed(Thread)

    def _process_listener(self):
        """ Handle the communications with the worker process.

        Executed by the _com_thread.

        """
        logger = logging.getLogger(__name__)
        logger.debug('Starting listener')

        while not self._pipe.poll(2):
            if not self._process.is_alive():
                logger.critical('Subprocess was found dead unexpectedly')
                self._stop.set()
                self._log_queue.put(None)
                self._monitor_queue.put((None, None))
                self._cleanup(process=False)
                self.done = ('FAILED', 'Subprocess failed to start')
                return

        mess = self._pipe.recv()
        if mess != 'READY':
            logger.critical('Subprocess was found dead unexpectedly')
            self.done = ('FAILED', 'Subprocess failed to start')
            self._cleanup()
            return

        # Infinite loop waiting for measure.
        while not self._stop.is_set():

            # Wait for measure and check for stopping.
            while not self._starting_allowed.wait(1):
                if self._stop.is_set():
                    self._cleanup()
                    return

            self._processing.set()

            # Send the measure.
            self._pipe.send(self._temp)
            logger.debug('Measure {} sent'.format(self._temp[0]))

            # Empty _temp and reset flag.
            self._temp = tuple()
            self._starting_allowed.clear()

            # Wait for the process to finish the measure and check it has not
            # been killed.
            while not self._pipe.poll(1):
                if self._force_stop.is_set():
                    self._cleanup()
                    return

            # Here get message from process and react
            meas_status, int_status, mess = self._pipe.recv()
            logger.debug('Subprocess done performing measure')

            if int_status == 'STOPPING':
                self._cleanup()

            if meas_status == 'INTERRUPTED' and not self._stop_requested:
                meas_status = 'FAILED'
                mess = mess.replace('was stopped', 'failed')

            # This event should be handled in the main thread so that this one
            # can stay responsive otherwise the engine will be unable to
            # shutdown.
            deferred_call(setattr, self, 'done', (meas_status, mess))

            self._processing.clear()

        self._cleanup()

    def _cleanup(self, process=True):
        """ Helper method taking care of making sure that everybody stops.

        Parameters
        ----------
        process : bool
            Wether to join the worker process. Used when the process has been
            termintaed abruptly.

        """
        logger = logging.getLogger(__name__)
        logger.debug('Cleaning up')
        self._pipe.close()
        if process:
            self._process.join()
            logger.debug('Subprocess joined')
        self._log_thread.join()
        logger.debug('Log thread joined')
        self._monitor_thread.join()
        logger.debug('Monitor thread joined')
        if self._pause_thread:
            self._pause_thread.join()
            logger.debug('Pause thread joined')
        self.active = False

    def _wait_for_pause(self):
        """ Wait for the task paused event to be set.

        """
        stop_sig = self._stop
        paused_sig = self._meas_paused

        while not stop_sig.is_set():
            if paused_sig.wait(0.1):
                status = ('PAUSED', 'Measure execution is paused')
                deferred_call(setattr, self, 'measure_status', status)
                break
예제 #36
0
파일: handler.py 프로젝트: ischoegl/ctwrap
def _worker(module: str, strategy: Dict[str, Any], output: Dict[str, Any],
            tasks_to_accomplish: mpq.Queue, lock: mps.Lock,
            verbosity: int) -> True:
    """
    Worker function running simulation queues.

    Arguments:
        module: Name of simulation module to be run
        tasks_to_accomplish: Queue of remaining tasks
        lock: Multiprocessing lock (used for parallel simulations only)
        output: Dictionary containing output information
        verbosity: Verbosity level

    Returns:
        True when tasks are completed
    """
    # pylint: disable=no-member

    parallel = isinstance(lock, mp.synchronize.Lock)
    if parallel:
        this = mp.current_process().name
    else:
        this = 'main'

    if verbosity > 1 and parallel:
        print(indent2 + 'starting ' + this)

    # create local copy of strategy object
    strategy = Strategy.load(strategy)
    variations = strategy.variations  # pylint: disable=no-member
    tasks = set(variations.keys())

    # create local copy of output object
    if isinstance(output, dict):
        out = Output.from_dict(output)
    else:
        out = None

    other = None
    reschedule = 0
    while reschedule < len(tasks):
        try:
            # retrieve next simulation task
            task, config = tasks_to_accomplish.get_nowait()

        except queue.Empty:
            # no tasks left
            if verbosity > 1 and parallel:
                print(indent2 + 'terminating ' + this)
            break

        try:
            # create object
            obj = Simulation.from_module(module)

            base = strategy.base(task)
            restart = None
            if obj.has_restart and out and base:
                if parallel:
                    with lock:
                        done = set(out.dir())
                        restart = out.load_like(base, other)
                else:
                    done = set(out.dir())
                    restart = out.load_like(base, other)
                invalid = done - tasks  # empty if labels follow task names
                if restart is None and not invalid:
                    # need to pick another task
                    if verbosity > 1:
                        print(indent2 +
                              'rescheduling {} ({})'.format(task, this))
                    tasks_to_accomplish.put((task, config))
                    reschedule += 1
                    continue

            if verbosity > 0:
                msg = indent1 + 'running `{}` ({})'
                print(msg.format(task, this))

            # run task
            if restart is None:
                obj.run(config)
            else:
                obj.restart(restart, config)

            data = obj.data
            errored = False

        except Exception as err:
            # Convert exception to warning
            msg = "Simulation of '{}' for '{}' failed with error message:\n{}".format(
                type(obj).__name__, task, err)
            warnings.warn(msg, RuntimeWarning)

            data = {task: (type(err).__name__, str(err))}
            errored = True

        # save output
        if out and obj.data:
            if parallel:
                with lock:
                    out.save(data,
                             entry=task,
                             variation=variations[task],
                             errored=errored)
            else:
                out.save(data,
                         entry=task,
                         variation=variations[task],
                         errored=errored)

            other = obj.data

    return True
예제 #37
0
class MultiProcess(object):
'''
Class MultiProcess
An object that can perform multiprocesses
'''
def __init__(self,ncpus=1):
self.ncpus = int(ncpus)
# Parallelization
self._parallel = None
self._paralleltasks = Queue()
self._parallelresults = Queue()
def initiateParallel(self):
self._parallel = [Consumer(self._paralleltasks,self._parallelresults)
for x in range(self.ncpus)]
for consumer in self._parallel:
consumer.start()
def addPoison(self):
for consumer in self._parallel:
self._paralleltasks.put(None)
 
def isTerminated(self):
for consumer in self._parallel:
if consumer.is_alive():
return False
return True
 
def killParallel(self):
for consumer in self._parallel:
consumer.terminate()
def doPHYML(self, indir, tree):
i = 0
dres = {}
redo = open('phymlfail.txt','w')
self.initiateParallel()
for f in os.listdir(indir):
if not f.endswith('.phy'):continue
align = f
obj = PHYML(indir, align, tree)
self._paralleltasks.put(obj)
# Poison pill to stop the workers
self.addPoison()
while True:
while not self._parallelresults.empty():
result = self._parallelresults.get()
if result[1] != 0:
msg(result[0],'ERR')
redo.write('%s\n'%result[0])
else:
msg('%s %d'%(result[0],i),'IMP')
i += 1
if self.isTerminated():
break
time.sleep(0.1)
# Get the last messages
while not self._parallelresults.empty():
result = self._parallelresults.get()
if result[1] != 0:
msg(result[0],'ERR')
redo.write('%s\n'%result[0])
else:
msg('%s %d'%(result[0],i),'IMP')
i += 1
self.killParallel()
return dres
 
class Highlighter:
def __init__(self):
self._msgTypes={'INF':'\033[0m',
'IMP':'\033[1;32m',
'DEV':'\033[1;34m',
'ERR':'\033[1;31m',
'WRN':'\033[1;33m'}
self._reset='\033[0m'
self._default='INF'
 
def ColorMsg(self,msg,msgLevel='INF'):
try:
s=self._msgTypes[msgLevel]+msg+self._reset
except:s=s=self._msgTypes[self._default]+msg+self._reset
return s
 
def msg(message, msgLevel='INF', sameline=False):
o=Highlighter()
if sameline:
sys.stderr.write('\r')
else:
sys.stderr.write(strftime("%H:%M:%S") + ' ')
sys.stderr.write(o.ColorMsg(message,msgLevel))
if not sameline:
sys.stderr.write('\n')
 
def creturn():
sys.stderr.write('\n')
 
def getOptions():
'''Retrieve the options passed from the command line'''
 
usage = "usage: python parallelPHYML.py [options]"
parser = OptionParser(usage)
 
group1 = OptionGroup(parser, "Inputs")
group1.add_option('-a', '--aligndir', action="store", dest='align',
default='OUT',
help='Alignment directory')
group1.add_option('-t', '--tree', action="store", dest='tree',
default='TREE.nwk',
help='Tree file')
group1.add_option('-r', '--threads', action="store", dest='threads',
default=1,
help='Threads [Default: 1]')
parser.add_option_group(group1)
# Parse the options
return parser.parse_args()
(options, args) = getOptions()
 
dres = MultiProcess(options.threads).doPHYML(options.align,options.tree)
예제 #38
0
class TestDLTMessageHandler(unittest.TestCase):
    def setUp(self):
        if six.PY2:
            self.filter_queue = Queue()
            self.message_queue = Queue()
        else:
            self.ctx = get_context()
            self.filter_queue = Queue(ctx=self.ctx)
            self.message_queue = Queue(ctx=self.ctx)
        self.client_cfg = {
            "ip_address": "127.0.0.1",
            "filename": "/dev/null",
            "verbose": 0,
            "port": "1234"
        }
        self.stop_event = Event()
        self.handler = DLTMessageHandler(self.filter_queue, self.message_queue,
                                         self.stop_event, self.client_cfg)

    def test_init(self):
        self.assertFalse(self.handler.mp_stop_flag.is_set())
        self.assertFalse(self.handler.is_alive())
        self.assertTrue(self.handler.filter_queue.empty())
        self.assertTrue(self.handler.message_queue.empty())

    def test_run_basic(self):
        self.assertFalse(self.handler.is_alive())
        self.handler.start()
        self.assertTrue(self.handler.is_alive())
        self.assertNotEqual(self.handler.pid, os.getpid())
        self.stop_event.set()
        self.handler.join()
        self.assertFalse(self.handler.is_alive())

    def test_handle_add_new_filter(self):
        self.handler.filter_queue.put(("queue_id", [("SYS", "JOUR")], True))
        time.sleep(0.01)
        self.handler.handle(None)
        self.assertIn(("SYS", "JOUR"), self.handler.context_map)
        self.assertEqual(self.handler.context_map[("SYS", "JOUR")],
                         ["queue_id"])

    def test_handle_remove_filter_single_entry(self):
        self.handler.filter_queue.put(("queue_id", [("SYS", "JOUR")], True))
        time.sleep(0.01)
        self.handler.handle(None)
        self.assertIn(("SYS", "JOUR"), self.handler.context_map)
        self.assertEqual(self.handler.context_map[("SYS", "JOUR")],
                         ["queue_id"])

        self.handler.filter_queue.put(("queue_id", [("SYS", "JOUR")], False))
        time.sleep(0.01)
        self.handler.handle(None)
        self.assertNotIn(("SYS", "JOUR"), self.handler.context_map)

    def test_handle_remove_filter_multiple_entries(self):
        self.handler.filter_queue.put(("queue_id1", [("SYS", "JOUR")], True))
        self.handler.filter_queue.put(("queue_id2", [("SYS", "JOUR")], True))
        time.sleep(0.01)
        self.handler.handle(None)
        self.assertIn(("SYS", "JOUR"), self.handler.context_map)
        self.assertEqual(self.handler.context_map[("SYS", "JOUR")],
                         ["queue_id1", "queue_id2"])

        self.handler.filter_queue.put(("queue_id1", [("SYS", "JOUR")], False))
        time.sleep(0.01)
        self.handler.handle(None)
        self.assertIn(("SYS", "JOUR"), self.handler.context_map)
        self.assertEqual(self.handler.context_map[("SYS", "JOUR")],
                         ["queue_id2"])

    def test_handle_multiple_similar_filters(self):
        self.handler.filter_queue.put(("queue_id0", [("SYS", "JOUR")], True))
        self.handler.filter_queue.put(("queue_id1", [("SYS", "JOUR")], True))
        time.sleep(0.01)
        self.handler.handle(None)
        self.assertIn(("SYS", "JOUR"), self.handler.context_map)
        self.assertEqual(self.handler.context_map[("SYS", "JOUR")],
                         ["queue_id0", "queue_id1"])

    def test_handle_multiple_different_filters(self):
        self.filter_queue.put(("queue_id0", [("SYS", "JOUR")], True))
        self.filter_queue.put(("queue_id1", [("DA1", "DC1")], True))
        time.sleep(0.01)
        self.handler.handle(None)
        self.assertIn(("SYS", "JOUR"), self.handler.context_map)
        self.assertIn(("DA1", "DC1"), self.handler.context_map)
        self.assertEqual(self.handler.context_map[("SYS", "JOUR")],
                         ["queue_id0"])
        self.assertEqual(self.handler.context_map[("DA1", "DC1")],
                         ["queue_id1"])

    def test_handle_message_tag_and_distribute(self):
        self.filter_queue.put(("queue_id0", [("SYS", "JOUR")], True))
        self.filter_queue.put(("queue_id1", [("DA1", "DC1")], True))
        self.filter_queue.put(("queue_id2", [("SYS", None)], True))
        self.filter_queue.put(("queue_id3", [(None, "DC1")], True))
        self.filter_queue.put(("queue_id4", [(None, None)], True))
        time.sleep(0.01)

        # - simulate receiving of messages
        for _ in range(10):
            for message in create_messages(stream_multiple, from_file=True):
                self.handler.handle(message)

        self.assertIn(("SYS", "JOUR"), self.handler.context_map)
        self.assertIn(("DA1", "DC1"), self.handler.context_map)
        self.assertIn((None, None), self.handler.context_map)
        self.assertIn(("SYS", None), self.handler.context_map)
        self.assertIn((None, "DC1"), self.handler.context_map)
        try:
            # 60 == 10 messages of each for SYS, JOUR and None combinations +
            #       10 for (None,None)
            messages = [
                self.message_queue.get(timeout=0.01) for _ in range(60)
            ]

            # these queues should not get any messages from other queues
            self.assertEqual(
                len([msg for qid, msg in messages if qid == 'queue_id0']), 10)
            self.assertEqual(
                len([msg for qid, msg in messages if qid == 'queue_id1']), 10)
            self.assertEqual(
                len([msg for qid, msg in messages if qid == 'queue_id2']), 10)
            self.assertEqual(
                len([msg for qid, msg in messages if qid == 'queue_id3']), 10)
            # this queue should get all messages
            self.assertEqual(
                len([msg for qid, msg in messages if qid == 'queue_id4']), 20)
        except Empty:
            # - we should not get an Empty for at least 40 messages
            self.fail()
예제 #39
0
 def put(self, obj, block=True, timeout=None):
     Queue.put(self, (self._id, obj), block=block, timeout=timeout)
예제 #40
0
import threading
from multiprocessing.queues import Queue

q = Queue()
for s in ["First", "Second", "Third"]: q.put(s)

class MyThread(threading.Thread):
    def run(self):
        while True:
            job_text = q.get()
            if (job_text == "quit"):
                return
            print "Job:" + job_text
        
thread = MyThread()
thread.start()

q.put("Another One")
q.put("quit")
thread.join()
print "Done"
예제 #41
0
class NonBlockSubprocess(object):
    CHUNK_SIZE_DEFAULT = 4096
    STOP_THREAD = b"STOP_THREAD_SYNTAX"

    """
    NonBlockSubprocess support read, write data via Queue.

    Parameters
    ----------
    process: Subprocess
        Sub process still alive

    chunk_size: int
        Size of chunk of reader process. (Default) CHUNK_SIZE_DEFAULT=4096

    Raises
    ---------
    ValueError:
        chunk size <= 0. Because read process will blocking if chunk_size <= 0.

    TypeError:
        process wrong type.

    RuntimeError:
        Process haven't any IO.
    """

    def __init__(self, process: Subprocess, chunk_size=CHUNK_SIZE_DEFAULT):
        if not isinstance(process, Subprocess):
            raise TypeError("process must be Subprocess")

        if not process.is_alive():
            raise ValueError("Process wasn't working.")

        if chunk_size <= 0:
            raise ValueError("Chunk size must be > 0.")

        if self.process.stdout is None and self.process.stdin is None:
            raise RuntimeError("Process IO are unavailable.")

        self.process = process
        self.chunk_size = chunk_size
        self.read_buffer_cache = b""

        if self.process.stdin is not None:
            self.queue_write = Queue()
            self.thread_write = Thread(target=self._write)
            self.thread_write.start()
        else:
            self.queue_write = None
            self.thread_write = None

        if self.process.stdout is not None:
            self.queue_read = Queue()
            self.thread_read = Thread(target=self._read)
            self.thread_read.start()
        else:
            self.queue_read = None
            self.thread_read = None

    def _write(self):
        if self.queue_write is None:
            return

        while 1:
            data = self.queue_write.get()
            if data == self.STOP_THREAD:
                break

            self.process.write(data)

    def write(self, data):
        if self.queue_write is None:
            raise AttributeError("Write data unavailable!")

        self.queue_write.put(data)

    def _read(self):
        if self.queue_read is None:
            return

        while 1:
            data = self.process.read(self.chunk_size)
            self.queue_read.put(data)

    def read(self, chunk_size=-1, timeout=None):
        """
        Read
        :param chunk_size:
        :param timeout:
        :return:
        """
        if self.queue_read is None:
            raise AttributeError("Read data unavailable!")

        chunk = self.read_buffer_cache
        while chunk.__len__() < chunk_size:
            chunk += self.queue_read.get(timeout=timeout)

        if chunk.__len__() > chunk_size:
            self.read_buffer_cache = chunk[chunk_size:]
            chunk = chunk_size[:chunk_size]
        return chunk

    def stop(self):
        """
        Stop read/write via queue. Not handle process.
        :return:
        """
        if self.queue_read is not None:
            self.queue_read.put(self.STOP_THREAD)

        if self.queue_write is not None:
            self.queue_read.put(self.STOP_THREAD)

        self.process.terminate()
예제 #42
0
파일: Output.py 프로젝트: allista/BioUtils
 def put(self, obj, block=True, timeout=None):
     Queue.put(self, (self._id, obj), block=block, timeout=timeout)
예제 #43
0
파일: smp.py 프로젝트: jucabot/polymr
class MultiCoreEngine():
    
    _mapred = None
    
    _out_queue = None
    _in_queue = None
    _log_queue = None
        
    _processes = None
    
        
    def __init__(self,mapred):
        self._mapred = mapred
            
    def _start(self,name,cpu, module_name, class_name, params):
        fn = None
        
        self._processes = []
        self._in_queue = Queue()
        self._out_queue = Queue()
        self._log_queue = Queue()
        
        if name == "mapper":
            fn = q_run_mapper
        elif name == "reducer":
            fn = q_run_reducer
        
        for i in range(cpu):
            process = Process(target=fn,args=(module_name, class_name ,params, self._in_queue, self._out_queue, self._log_queue))
            self._processes.append(process)
            process.start()
    
    def _stop(self):
        
        for process in self._processes:
            self._in_queue.put("STOP")
        
        while not self._log_queue.empty():
            print self._log_queue.get()
    
    def _get_data_chunks(self):
        chunks = []
        for process in self._processes:
            chunks.append(self._out_queue.get())
        
        return chunks
    
    def _set_data_chunks(self, chunks):
        
        map(self._in_queue.put,chunks)
        
                        
    def _send_lines(self,lines, cpu, lines_len ):
        line_splits = [lines[i* lines_len / cpu : (i+1)* lines_len / cpu] for i in range(cpu) ]
                    
        for i in range(cpu): 
            self._in_queue.put(line_splits[i])
    
    def _terminate(self):
        for process in self._processes:
            process.join()
            process.terminate()
                
        self._in_queue.close()
        self._out_queue.close()
        self._processes = None
        
    def _force_terminate(self):
        for process in self._processes:
            process.terminate()
            
    def _merge_data(self, data):
       
        self._mapred.data = merge_kv_dict(self._mapred.data,data)
                
    def _merge_reduced_data(self, data):
       
        self._mapred.data_reduced = merge_kv_dict(self._mapred.data_reduced,data)
                
    def _split_data(self, num_splits):
        splits = []
        index = 0
        
        len_data = len(self._mapred.data)
        
        chunk_len = int(math.ceil(len_data / float(num_splits)))
        
        if chunk_len == 0:
            splits.append(self._mapred.data)
        else:        
            for i in range(int(math.ceil(len_data/float(chunk_len)))):
                splits.append({})
                
            for (key, value) in self._mapred.data.items():
                
                i = int(math.floor(index / float(chunk_len)))
                       
                splits[i][key]=value
                
                index = index + 1
        
        return splits
    
    
    def _run_map(self,cpu,cache_line,input_reader ):
        
        self._start("mapper",cpu, self._mapred.__class__.__module__,self._mapred.__class__.__name__ ,self._mapred.params)
    
        try:
            map_len = 0
            lines = []
            lines_len = 0
            f = input_reader.read()
              
            for line in f:
                if  lines_len > 0 and lines_len % cache_line == 0:
                    self._send_lines(lines, cpu, lines_len)        
                    lines = []
                    lines_len = 0
               
                lines.append(line)
                lines_len += 1
                map_len += 1
                 
            input_reader.close()
            
            self._send_lines(lines, cpu, lines_len)
            
            self._stop()
            
            map(self._merge_data, self._get_data_chunks())
                
                
            self._terminate()
            
        
        except Exception,e:
            print "ERROR: Exception while mapping : %s\n%s" % (e,traceback.print_exc())
            self._force_terminate()
             
        return map_len
예제 #44
0
class YubiGuard:
    def __init__(self, scrlck_mode=False):
        self.scrlck_mode = scrlck_mode

        self.id_q = Queue()
        self.on_q = Queue()
        self.pi_q = Queue()

        # init processes
        gi_proc = Process(target=self.get_ids)
        gi_proc.daemon = True

        cs_proc = Process(target=self.change_state)
        # no daemon, or main program will terminate before Keys can be unlocked
        cs_proc.daemon = False

        zmq_lis = ZmqListener(
            self.on_q)  # somehow works ony with threads not processes
        zmq_lis_thr = Thread(target=zmq_lis.start_listener)
        zmq_lis_thr.setDaemon(True)

        pi = PanelIndicator(self.pi_q, self.on_q)

        # starting processes and catching exceptions:
        try:
            gi_proc.start()
            cs_proc.start()
            zmq_lis_thr.start()

            pi.run_pi()  # main loop of root process

        except (KeyboardInterrupt, SystemExit):
            print('Caught exit event.')

        finally:
            # send exit signal, will reactivate YubiKey slots
            print('Sending EXIT_SIGNAL')
            self.on_q.put(EXIT_SIGNAL)

    def get_ids(self):
        old_id_l = []
        no_key = True
        pat = re.compile(r"(?:Yubikey.*?id=)(\d+)", re.IGNORECASE)
        while True:
            new_id_l = []
            # get list of xinput device ids and extract those of YubiKeys:

            xinput = shell_this('xinput list')
            matches = re.findall(pat, xinput)
            new_id_l.extend(matches)
            new_id_l.sort()

            if not new_id_l and not no_key:
                self.pi_q.put(NOKEY_SIGNAL)
                print('No YubiKey(s) detected.')
                no_key = True
            elif new_id_l and no_key:
                self.pi_q.put(OFF_SIGNAL)
                print('YubiKey(s) detected.')
                no_key = False
                # notify:
                msg_cmd = """notify-send --expire-time=2000 \
                'YubiKey(s) detected.'"""
                shell_this(msg_cmd)

            if new_id_l != old_id_l:
                print('Change in YubiKey ids detected. From {} to {}.'.format(
                    old_id_l, new_id_l))
                self.id_q.put(new_id_l)

                # lock screen if screenlock and YubiKey is removed:
                if self.scrlck_mode and len(new_id_l) < len(old_id_l):
                    print('Locking screen.')
                    shell_this(get_scrlck_cmd())  # execute screen lock command

            old_id_l = new_id_l

            time.sleep(.1)

    def turn_keys(self, id_l, lock=True
                  ):  # problem of value loss of cs_id_l found in this function
        tk_id_l = id_l
        if lock:
            print('Locking YubiKey(s).')
            state_flag = '0'
            self.pi_q.put(OFF_SIGNAL)
        else:
            print('Unlocking YubiKey(s).')
            state_flag = '1'
            self.pi_q.put(ON_SIGNAL)

        shell_this('; '.join(["xinput set-int-prop {} \"Device Enabled\" 8 {}".
                              format(tk_id, state_flag) for tk_id in tk_id_l]))

    def check_state(self, check_id_l):
        # check if all states have indeed changed:
        pat = re.compile(r"(?:Device Enabled.+?:).?([01])", re.IGNORECASE)
        # check if state has indeed changed:

        for tk_id in check_id_l:
            sh_out = shell_this('xinput list-props {}'.format(tk_id))
            match = re.search(pat, sh_out)
            if match:
                if match.group(1) != '0':
                    return False

    def change_state(self):
        cs_id_l = []
        cs_signal = ''

        while True:
            # retrieve input from queues
            while self.id_q.qsize() > 0:
                cs_id_l = self.id_q.get()

            while self.on_q.qsize() > 0:
                cs_signal = self.on_q.get()
                # not accepting any more signals
                if cs_signal == EXIT_SIGNAL:
                    self.turn_keys(cs_id_l, lock=False)
                    sys.exit(0)

            # lock/unlock
            if cs_id_l:
                if cs_signal == ON_SIGNAL:
                    self.turn_keys(cs_id_l, lock=False)

                    mon_thread = Thread(
                        target=self.yk_monitor, args=(cs_id_l, ))
                    mon_thread.start()
                    mon_thread.join()

                    # putting in separator, nullifying all preceding ON_SIGNALS
                    # to prevent possible over-triggering:
                    self.on_q.put('')

                elif self.check_state(
                        cs_id_l) is False:  # lock keys if they are unlocked
                    self.turn_keys(cs_id_l, lock=True)
            # reset state to prevent continued unlocking/locking
            cs_signal = ''
            time.sleep(.01)

    def yk_monitor(self, mon_l):
        # forming command to run parallel monitoring processes
        mon_cmd = ' & '.join(["xinput test {}".format(y_id) for y_id in mon_l])
        monitor = subprocess.Popen(mon_cmd, shell=True, stdout=subprocess.PIPE)

        stdout_queue = Queue()
        stdout_reader = AsynchronousFileReader(monitor.stdout, stdout_queue)
        stdout_reader.start()

        triggered = False
        timestamp = time.time()
        while not stdout_reader.eof and time.time() - timestamp < TIMEOUT:
            while stdout_queue.qsize() > 0:
                stdout_queue.get()  # emptying queue
                triggered = True
                time.sleep(.04)
            if triggered:
                print('YubiKey triggered. Now disabling.')
                break

            time.sleep(.001)
        if not triggered:
            print('No YubiKey triggered. Timeout.')
 def put(self, *args, **kwargs):
         # If the put fails, the exception will prevent us from incrementing the counter
         Queue.put(self, *args, **kwargs)
         with self._lock:
                 self._counter.value += 1
예제 #46
0
import threading
import trace
import WorkerClass
import multiprocessing
from multiprocessing import Queue
from typing import List, Any
import audioop
"""-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*"""
frames = []
global frames2
frames2 = []

is_on = False
q = Queue()
q2 = Queue()
q2.put(True)
q3 = Queue()
q3.put(True)


def main():
    def proc_start():
        p_to_start = multiprocessing.Process(target=Worker.NoiseGate,
                                             args=(q, q3))
        p_to_start.start()
        return p_to_start

    def proc_stop(p_to_stop):
        p_to_stop.terminate()
        print
        "after Termination "