Example #1
0
    def __init__(self, config, event_queue, debug, rib, policy_handler, test,
                 no_notifications, rib_timing, notification_timing):
        super(LoopDetector, self).__init__(config, event_queue, debug)

        self.config = config

        self.cib = CIB(self.config.id)
        self.rib = rib
        self.policy_handler = policy_handler

        # mapping of participant and prefix to list of forbidden forward participants
        self.forbidden_paths = defaultdict(lambda: defaultdict(list))

        self.run = False
        self.listener = Listener(
            (self.config.sdx.address, self.config.loop_detector.port),
            authkey=None)
        self.msg_in_queue = Queue(1000)
        self.msg_out_queue = Queue(1000)

        self.no_notifications = no_notifications

        self.rib_timing = rib_timing
        if self.rib_timing:
            self.rib_timing_file = 'rib_timing_' + str(int(time())) + '.log'

        self.notification_timing = notification_timing
        if self.notification_timing:
            self.notification_timing_file = 'notification_timing_' + str(
                int(time())) + '.log'
Example #2
0
 def compute_sp(self):
     from queue import Queue
     queue = Queue()
     datalen = len(self.D['coords'])
     self(queue,0,datalen, True, False)
     self(queue,0,datalen, False, False)
     return queue.get() + queue.get()
Example #3
0
class TestBlockingMethods(unittest.TestCase):
    def setUp(self):
        self.quiet=True
        self.random = Random()
        self._timers = []
        self.namespaces = []
        self.iface = PyRQIface(quiet=self.quiet, ref="test")
        self.dummyQueue = Queue()
        self.marshaller = MarshallerFactory.get(MarshallerFactory.DEFAULT, quiet=self.quiet)
        desiredPort = "19001"
        self.r = SubprocessQueueServer(
                                       desiredPort=desiredPort,
                                       handlerClazz=Linkage.create(MockHandler),
                                       quiet=self.quiet
#           includePydevd="/home/francis/.eclipse/org.eclipse.platform_3.7.0_155965261/plugins/org.python.pydev.debug_2.5.0.2012040618/pysrc"
           )
        PyRQIface.setGlobalPYRQ(self.r.details())
        self.r.start().waitUntilRunning()
        pass
    def tearDown(self):
        try:
            self.dummyQueue.close()
            del self.dummyQueue
        except Exception, _e:
            pass
        for namespace in self.namespaces:
            self.iface.setNamespace(namespace)
        try:    self.iface.close()
        except ClosedError, _e:
            pass
Example #4
0
def main():
    result_queue = Queue()
    crawler = CrawlerWorker(CanberraWealtherSpider(), result_queue)
    crawler.start()
    for item in result_queue.get():
        #print datetime.datetime.now(),item
        print item
Example #5
0
class Multiprocess(object):
    # THE COMPLICATION HERE IS CONNECTING THE DISPARATE LOGGING TO
    # A CENTRAL POINT


    def __init__(self, functions):
        self.outbound = Queue()
        self.inbound = Queue()
        self.inbound = Queue()

        #MAKE

        #MAKE THREADS
        self.threads = []
        for t, f in enumerate(functions):
            thread = worker(
                "worker " + unicode(t),
                f,
                self.inbound,
                self.outbound,
            )
            self.threads.append(thread)


    def __enter__(self):
        return self

    #WAIT FOR ALL QUEUED WORK TO BE DONE BEFORE RETURNING
    def __exit__(self, a, b, c):
        try:
            self.inbound.close() # SEND STOPS TO WAKE UP THE WORKERS WAITING ON inbound.pop()
        except Exception, e:
            Log.warning("Problem adding to inbound", e)

        self.join()
Example #6
0
def parallel_process_by_queue(num_p,
                              data_iter,
                              target,
                              args,
                              ctx: BaseContext = None,
                              task_unit_size=5000,
                              print_out=__debug__):
    if isinstance(target, MPTarget):
        target.use_queue = True
    if ctx is None:
        ctx = get_context('spawn')
    iq = Queue(ctx=ctx)
    oq: Manager = ctx.Manager().Queue()

    tic(f"Creating input queue with task unit size {task_unit_size}",
        verbose=print_out)
    cnt_task_unit = 0
    for item in tqdm(slices__(data_iter, task_unit_size)):
        iq.put(item)
        cnt_task_unit += 1
    jobs = [None] * num_p
    for i in range(num_p):
        jobs[i] = ctx.Process(target=target, args=(i, iq, oq) + args)
    toc()

    tic(f"Working on {cnt_task_unit} task units with {num_p} processes",
        verbose=print_out)
    start_and_wait_jobs(jobs)

    out = []
    while not oq.empty():
        out.append(oq.get_nowait())
    toc()
    return out
Example #7
0
 def put(self, contents: QT, block=True, timeout=None):
     self.lock.acquire()
     while not self.empty():
         Queue.get(self, block=False)
     # NOTE/TODO: this, because multiprocessing Queues are stupid, is
     # necessary. Explained in short, if you try to q.put_nowait() too
     # quickly, it breaks. For example, say you were in ipython,
     # and you typed to following
     # - q = MonoQueue()
     # - q.put_nowait(2)
     # - q.put_nowait(3)
     # - q.put_nowait(4)
     # - q.put_nowait(5)
     # EVEN THOUGH there is a Lock() to atomize the access to the Queue,
     # one of the non-first 'put_nowait()' calls will acquire the lock,
     # the 'self.empty()' call is apparently True, even though something is
     # actually in the queue, and then it will not '.get()' it and try to
     # put something in the queue, raise a 'Full' exception.
     # So basically, apparently if something tries to put in the queue too
     # quickly, everything breaks. And yes, I made a pytest to test this,
     # guess what, if you try to run a trace (debugger), aka you jus step
     # through, it works fine, but as soon as you just run it, it breaks.
     # UGH, maybe I'm dumb and am doing something wrong
     with suppress(Full):
         Queue.put(self, contents, block=block, timeout=timeout)
     self.lock.release()
Example #8
0
 def compute_sp(self):
     from Queue import Queue
     queue = Queue()
     datalen = len(self.D['coords'])
     self(queue, 0, datalen, True, False)
     self(queue, 0, datalen, False, False)
     return queue.get() + queue.get()
Example #9
0
    def _calculate_rmse_mp(self, population, process_count):
        i = 0
        process_pop = dict()
        while i < len(population):
            for j in range(process_count):
                if str(j) not in process_pop.keys():
                    process_pop[str(j)] = []

                if i < len(population):
                    process_pop[str(j)].append(population[i])
                    i += 1

        final_population = []
        queue = Queue()
        processes = []
        for i in range(process_count):
            pop = process_pop[str(i)]

            process = Process(target=self._calculate_rmse,
                              name="%d" % i,
                              args=(pop, queue))
            process.start()
            processes.append(process)

        for i in range(process_count):
            final_population += queue.get()

        for process in processes:
            process.join()

        return final_population
Example #10
0
 def __init__(self, name=None):
     self.name = name
     self._state = None
     ctx = SpawnContext()
     self.alive = Value('b', True)
     self.in_queue = Queue(ctx=ctx, maxsize=120)
     self.out_queue = Queue(ctx=ctx, maxsize=110)
def main():
    result_queue = Queue()
    crawler = CrawlerWorker(CanberraWealtherSpider(), result_queue)
    crawler.start()
    for item in result_queue.get():
        #print datetime.datetime.now(),item
        print item
Example #12
0
class Multiprocess(object):
    # THE COMPLICATION HERE IS CONNECTING THE DISPARATE LOGGING TO
    # A CENTRAL POINT

    def __init__(self, functions):
        self.outbound = Queue()
        self.inbound = Queue()
        self.inbound = Queue()

        #MAKE

        #MAKE THREADS
        self.threads = []
        for t, f in enumerate(functions):
            thread = worker(
                "worker " + unicode(t),
                f,
                self.inbound,
                self.outbound,
            )
            self.threads.append(thread)

    def __enter__(self):
        return self

    #WAIT FOR ALL QUEUED WORK TO BE DONE BEFORE RETURNING
    def __exit__(self, a, b, c):
        try:
            self.inbound.close(
            )  # SEND STOPS TO WAKE UP THE WORKERS WAITING ON inbound.pop()
        except Exception, e:
            Log.warning("Problem adding to inbound", e)

        self.join()
Example #13
0
    def process_init(self):
        self.event_queue = Queue(1000)
        self.event_queue_name = str(id(self))
        PmakeManager.queues[self.event_queue_name] = self.event_queue

        # info('Starting %d processes' % self.num_processes)

        self.subs = {}  # name -> sub
        # available + processing + aborted = subs.keys
        self.sub_available = set()
        self.sub_processing = set()
        self.sub_aborted = set()

        db = self.context.get_compmake_db()
        storage = db.basepath  # XXX:
        logs = os.path.join(storage, 'logs')

        #self.signal_queue = Queue()

        for i in range(self.num_processes):
            name = 'parmake_sub_%02d' % i
            write_log = os.path.join(logs, '%s.log' % name)
            make_sure_dir_exists(write_log)
            signal_token = name

            self.subs[name] = PmakeSub(name=name,
                                       signal_queue=None,
                                       signal_token=signal_token,
                                       write_log=write_log)
        self.job2subname = {}
        # all are available
        self.sub_available.update(self.subs)

        self.max_num_processing = self.num_processes
Example #14
0
    def process_init(self):
        self.event_queue = Queue()
        self.event_queue_name = str(id(self))
        from compmake.plugins.backend_pmake.pmake_manager import PmakeManager
        PmakeManager.queues[self.event_queue_name] = self.event_queue

        # info('Starting %d processes' % self.num_processes)

        self.subs = {}  # name -> sub
        # available + processing + aborted = subs.keys
        self.sub_available = set()
        self.sub_processing = set()
        self.sub_aborted = set()

        self.signal_queue = Queue()

        db = self.context.get_compmake_db()
        storage = db.basepath  # XXX:
        logs = os.path.join(storage, 'logs')
        for i in range(self.num_processes):
            name = 'w%02d' % i
            write_log = os.path.join(logs, '%s.log' % name)
            make_sure_dir_exists(write_log)
            signal_token = name
            self.subs[name] = PmakeSub(name, 
                                       signal_queue=self.signal_queue,
                                       signal_token=signal_token,
                                       write_log=write_log)
        self.job2subname = {}
        self.subname2job = {}
        # all are available at the beginning
        self.sub_available.update(self.subs)

        self.max_num_processing = self.num_processes
Example #15
0
 def _setup(self):
     if isinstance(self._instruction, I.ATM):
         self._code = self._instruction.code
         self._instruction_pipelines.append(Pipeline(self._code.instructions, self._tables))
         self._atomic_process = Process(target=self.run_atomic)
         self._is_atomic_enabled = True
     elif isinstance(self._instruction, I.SEQ):
         self._code = self._instruction.code
         self._instruction_pipelines.append(Pipeline(self._code.instructions, self._tables))
         self._sequential_ingress_process = Process(target=self.run_sequential_ingress)
         self._sequential_egress_process = Process(target=self.run_sequential_egress)
         self._metadata_queue = Queue()
         self._is_sequential_enabled = True
     elif isinstance(self._instruction, I.CNC):
         # Note: CNC can't have PUSH/POP instructions in its code blocks. They violate the concurrency invariant.
         self._codes = self._instruction.codes
         self._modified_locations = []
         self._modified_reserved_fields = []
         self._modified_fields = []
         for code in self._codes:
             self._instruction_pipelines.append(Pipeline(code.instructions, self._tables))
             self._modified_locations.append(get_modified_locations(code.instructions))
             self._modified_reserved_fields.append(get_modified_reserved_fields(code.instructions))
             self._modified_fields.append(get_modified_fields(code.instructions, code.argument_fields))
         self._concurrent_ingress_process = Process(target=self.run_concurrent_ingress)
         self._concurrent_egress_process = Process(target=self.run_concurrent_egress)
         self._metadata_queue = Queue()
         self._is_concurrent_enabled = True
     else:
         raise RuntimeError()
Example #16
0
    def __init__(self, process: Subprocess, chunk_size=CHUNK_SIZE_DEFAULT):
        if not isinstance(process, Subprocess):
            raise TypeError("process must be Subprocess")

        if not process.is_alive():
            raise ValueError("Process wasn't working.")

        if chunk_size <= 0:
            raise ValueError("Chunk size must be > 0.")

        if self.process.stdout is None and self.process.stdin is None:
            raise RuntimeError("Process IO are unavailable.")

        self.process = process
        self.chunk_size = chunk_size
        self.read_buffer_cache = b""

        if self.process.stdin is not None:
            self.queue_write = Queue()
            self.thread_write = Thread(target=self._write)
            self.thread_write.start()
        else:
            self.queue_write = None
            self.thread_write = None

        if self.process.stdout is not None:
            self.queue_read = Queue()
            self.thread_read = Thread(target=self._read)
            self.thread_read.start()
        else:
            self.queue_read = None
            self.thread_read = None
Example #17
0
    def force_stop(self):
        self._stop_requested = True
        # Just in case the user calls this directly. Will signal all threads to
        # stop (save _com_thread).
        self._stop.set()
        self._log_queue.put(None)
        self._monitor_queue.put((None, None))

        # Set _force_stop to stop _com_thread.
        self._force_stop.set()

        # Terminate the process and make sure all threads stopped properly.
        self._process.terminate()
        self._log_thread.join()
        self._monitor_thread.join()
        self._com_thread.join()
        self.active = False
        if self._processing.is_set():
            self.done = ('INTERRUPTED', 'The user forced the system to stop')
            self._processing.clear()

        # Discard the queues as they may have been corrupted when the process
        # was terminated.
        self._log_queue = Queue()
        self._monitor_queue = Queue()
Example #18
0
class WorkerThreads(object):
    def __init__(self, threads=1):
        """
        Initialize the thread pool and queues.
        """
        self.pools = ThreadPool(processes=threads)
        self.updater_queue = Queue()

    def get_updater_queue(self):
        return self.updater_queue

    def updater(self, ident, state, meta):
        """
        Updater function: This just post a message to a queue.
        """
        self.updater_queue.put({'id': ident, 'state': state, 'meta': meta})

    def pull(self, request, updater, testmode=0):
        try:
            pull(request, updater, testmode=testmode)
        except Exception as err:
            resp = {'error_type': str(type(err)),
                    'message': str(err)}

            updater.update_status('FAILURE', 'FAILURE', response=resp)

    def expire(self, request, updater):
        try:
            remove_image(request, updater)
        except Exception as err:
            resp = {'error_type': str(type(err)),
                    'message': str(err)}
            updater.update_status('FAILURE', 'FAILURE', response=resp)

    def wrkimport(self, request, updater, testmode=0):
        try:
            img_import(request, updater, testmode=testmode)
        except Exception as err:
            resp = {'error_type': str(type(err)),
                    'message': str(err)}
            updater.update_status('FAILURE', 'FAILURE', response=resp)

    def dopull(self, ident, request, testmode=0):
        """
        Kick off a pull operation.
        """
        updater = Updater(ident, self.updater)
        self.pools.apply_async(self.pull, [request, updater],
                               {'testmode': testmode})

    def doexpire(self, ident, request, testmode=0):
        updater = Updater(ident, self.updater)
        self.pools.apply_async(self.expire, [request, updater])

    def dowrkimport(self, ident, request, testmode=0):
        logging.debug("wrkimport starting")
        updater = Updater(ident, self.updater)
        self.pools.apply_async(self.wrkimport, [request, updater],
                               {'testmode': testmode})
Example #19
0
class WorkerThreads(object):
    def __init__(self, threads=1):
        """
        Initialize the thread pool and queues.
        """
        self.pools = ThreadPool(processes=threads)
        self.updater_queue = Queue()

    def get_updater_queue(self):
        return self.updater_queue

    def updater(self, ident, state, meta):
        """
        Updater function: This just post a message to a queue.
        """
        self.updater_queue.put({'id': ident, 'state': state, 'meta': meta})

    def pull(self, request, updater, testmode=0):
        try:
            pull(request, updater, testmode=testmode)
        except Exception as err:
            resp = {'error_type': str(type(err)),
                    'message': str(err)}

            updater.update_status('FAILURE', 'FAILURE', response=resp)

    def expire(self, request, updater):
        try:
            remove_image(request, updater)
        except Exception as err:
            resp = {'error_type': str(type(err)),
                    'message': str(err)}
            updater.update_status('FAILURE', 'FAILURE', response=resp)

    def wrkimport(self, request, updater, testmode=0):
        try:
            img_import(request, updater, testmode=testmode)
        except Exception as err:
            resp = {'error_type': str(type(err)),
                    'message': str(err)}
            updater.update_status('FAILURE', 'FAILURE', response=resp)

    def dopull(self, ident, request, testmode=0):
        """
        Kick off a pull operation.
        """
        updater = Updater(ident, self.updater)
        self.pools.apply_async(self.pull, [request, updater],
                               {'testmode': testmode})

    def doexpire(self, ident, request, testmode=0):
        updater = Updater(ident, self.updater)
        self.pools.apply_async(self.expire, [request, updater])

    def dowrkimport(self, ident, request, testmode=0):
        logging.debug("wrkimport starting")
        updater = Updater(ident, self.updater)
        self.pools.apply_async(self.wrkimport, [request, updater],
                               {'testmode': testmode})
Example #20
0
 def setUp(self):
     pg_connector = PostgresConnector(_POSTGRES_DSN)
     self.notif_queue = Queue(1)
     self.listener = PostgresNotificationListener(
         pg_connector, _NOTIF_CHANNEL, self.notif_queue,
         CommonErrorStrategy(), Queue(), fire_on_start=False
     )
     self.listener.log = MagicMock()
Example #21
0
def empty_queue(queue_: Queue) -> None:
    while True:
        try:
            queue_.get(block=False)
        except queue.Empty:
            break

    queue_.close()
Example #22
0
 def setUp(self):
     self.notif_queue = Queue(1)
     self.error_queue = Queue()
     self.component = Component(self.notif_queue._reader,
                                CommonErrorStrategy(),
                                self.error_queue,
                                PostgresConnector(_POSTGRES_DSN))
     self.component.log = MagicMock()
Example #23
0
 def __init__(self, name, machine, ready_func, workers=1):
     self.id = name
     self.machine = machine
     self.ready_func = ready_func
     self.name = name
     self.readq = Queue(maxsize=-1, ctx=multiprocessing.get_context())
     self.num_workers = max(1, workers)
     self.workers = []
Example #24
0
    def __enter__(self):
        self.smtp_process_queue = Queue()
        self.smtp_process = Process(
            target=get_otp_mail, args=(self.smtp_process_queue, self.timeout))
        self.smtp_process.start()
        self.port = self.smtp_process_queue.get(True, 5)
        self._do_lintop_config()

        return self
Example #25
0
 def __init__(self, maxsize=0):
     Queue.__init__(self, maxsize, ctx=multiprocessing)
     self.__timer_ctrl_func = None
     self._flow_ctrl = icom_flow_ctrl.flow_ctrl(current_thread().ident)
     self._win_msg = None
     self.__buffer_msg = []
     self._flow_ctrl.set_timer_req_function(self.req_timer_ctrl_Q_cmd)
     self._flow_ctrl.set_flow_ctrl_req_function(self.req_flow_ctrl_Q_cmd)
     self._flow_ctrl.set_enable(True)
Example #26
0
 def __init__(self, maxsize=0):
     """Constructor
     
     Parameters:
     maxsize -- Maximium size of this Pile
     """
     Queue.__init__(self, maxsize)
     self._tasks = RawValue('i',0)
     self._tasks_lock = Lock()
    def put(self, obj, block=True, timeout=None):
        Queue.put(self, obj, block, timeout)
        self._put_counter.value += 1

        if self.qsize() != 0:
            self.cond_notempty.acquire()
            try:
                self.cond_notempty.notify_all()
            finally:
                self.cond_notempty.release()
 def setUp(self):
     self.filter_queue = Queue()
     self.message_queue = Queue()
     self.client_cfg = {"ip_address": b"127.0.0.1",
                        "filename": b"/dev/null",
                        "verbose": 0,
                        "port": "1234",
                        }
     self.stop_event = Event()
     self.handler = DLTMessageHandler(self.filter_queue, self.message_queue, self.stop_event, self.client_cfg)
 def start(self,url):                
     # raise BadFormatError
     items = []
     # The part below can be called as often as you want        
     results = Queue()
     crawler = CrawlerWorker(LinkedinSpider(url), results)
     crawler.start()
     for item in results.get():
         items.append(dict(item))
     return items
Example #30
0
def put_while(queue: Queue,
              task: Any,
              predicate: Callable[[], bool],
              timeout: int = 3):
    while predicate():
        try:
            queue.put(task, block=True, timeout=timeout)
            break
        except Full:
            pass
Example #31
0
class ServerSink(iMockDebuggerSink):
    def __init__(self, peerName, theTime, details, quiet):
        self._peerName = peerName
        self._methods = []
        methods = iMockDebuggerSink()._getMethods()
        self._methods = methods
        self._terminate = False
        self._details = details
        self._qw = None
        self._startMutex = Semaphore(0)
        self._q = Queue()
        self.quiet= quiet
        self._marshaller = MarshallerFactory.get(MarshallerFactory.DEFAULT, quiet=quiet)
        self._qw = QueueWriter(target=details, autoConnect=True, marshaller=self._marshaller, quiet=quiet)
        self._qw.start()
        self.thread = None
    def start(self):
        t = threading.Thread(target=self.run, args=[self._startMutex])
        t.setName("ServerSink.%(P)s"%{"P":self._peerName})
        t.setDaemon(True)
        self.thread = t
        self.thread.start()
        return "server.sink.started"
    def close(self):
        self._terminate = True
        try:    self.thread.join()
        except: pass
        try:    self._qw.close()
        except: pass
        try:    self._q.close()
        except: pass
        return "server.sink.closed"
    def waitUntilRunning(self, block=True, timeout=None):
        self._startMutex.acquire(block=block, timeout=timeout)
        return self
    def __getattribute__(self, name):
        if name in object.__getattribute__(self, "_methods"):
            q = self._q
            def wrapper(self, *args, **kwargs):
                ServerSink._testPickleability((name, args, kwargs))
                q.put((name, args, kwargs))
            return wrapper
        return object.__getattribute__(self, name)
    def run(self, startMutex):
        startMutex.release()
        while self._terminate==False:
            try:
                data = self._q.get(block=True, timeout=1)
            except Empty:   pass
            else:
                ServerSink._testPickleability(data)
                try:
                    self._qw.put(data, block=True, timeout=10)
                except Exception, _e:
                    break
Example #32
0
def run(worker_fn: Callable[[], DataWorker], max_tasks: int, in_queue: Queue,
        out_queue: Queue, cancel_process: Value):
    # This is the worker function of a spawned process.
    # While the task does not receive a Finished flag, if fetches an input sample which is yielded in the worker.
    # The outputs are then written to the output queue.
    # Each thread is stopping if max_tasks is reached.
    logger.debug("Worker starting")
    worker = worker_fn()
    worker.initialize_thread()

    def generator():
        # Transform the input queue to a generator
        # Note that other processes read from the sample in_queue
        while True:
            try:
                s = in_queue.get(timeout=0.01)
            except Empty:
                logger.debug("In queue empty.")
                if cancel_process.value:
                    logger.debug("Canceling working generator.")
                    break
            else:
                if isinstance(s, Finished):
                    logger.debug(
                        "Received Finished. Stopping working generator")
                    break
                yield s

    for out in worker.process(generator()):
        # Process the data and write it to the queue
        while True:
            if cancel_process.value:
                logger.debug("Canceling working processor (inner).")
                break
            try:
                out_queue.put(out, timeout=0.01)
            except Full:
                logger.debug("Out queue Full.")
                continue
            else:
                break

        if cancel_process.value:
            logger.debug("Canceling working processor (outer).")
            break

        max_tasks -= 1
        if max_tasks == 0:
            logger.debug("Max tasks reached for this worker. Stopping.")
            break

    logger.debug("Worker finished")
    if cancel_process.value:
        out_queue.cancel_join_thread(
        )  # this prevents a deadlock if the generator is stopped prematurely
Example #33
0
class BasicActor(ABC):
    name: Optional[str]
    in_queue: Queue
    out_queue: Queue
    alive: Value
    _loop_task: Optional[Task]

    def __init__(self, name=None):
        self.name = name
        self._state = None
        ctx = SpawnContext()
        self.alive = Value('b', True)
        self.in_queue = Queue(ctx=ctx, maxsize=120)
        self.out_queue = Queue(ctx=ctx, maxsize=110)

    async def runner(self):
        loop = get_running_loop()
        self._state = await self.handle_started()
        self._loop_task = loop.create_task(self._do_loop(loop))

    async def _do_loop(self, loop: AbstractEventLoop):
        while loop.is_running():
            try:
                sent_from, message = self.in_queue.get(timeout=0.1)
                loop.create_task(
                    self._handle_message(message, sent_from, self._state))
            except Empty:
                pass
            await asyncio.sleep(0)

    def send_message(self, to, message):
        if self.out_queue.qsize() > 100:
            logger.warning("Shedding excess outgoing message")
            return
        self.out_queue.put((to, message))

    async def _handle_message(self, message, sent_from, state):
        try:
            self._state = await self.handle_message(message, sent_from, state)
        except:
            self.stop()
            raise

    def stop(self):
        self.alive.value = False
        if self._loop_task:
            self._loop_task.cancel()

    @abstractmethod
    async def handle_message(self, message, sent_from, state) -> Any:
        pass

    @abstractmethod
    async def handle_started(self) -> Any:
        pass
Example #34
0
    def __put(self, obj, block=True, timeout=None):
        if win_gui_Queue.__sync_flow_ctrl_count <= win_gui_Queue.__SAMPLE_SIZE:
            Queue.put(self, obj, block, timeout)
            if win_gui_Queue.__real_proc_data_msg_count > win_gui_Queue.__MAX_MSG_RATE_THREDHOLD:
                self.flow_ctrl(win_gui_Queue.__real_proc_data_msg_count *
                               win_gui_Queue.__SAMPLE_SIZE //
                               (win_gui_Queue.__MAX_MSG_RATE_THREDHOLD))
            return self.do_send_ctrl_msg(ICOM_CTRL_MSG.ID_PROC_DATA_MSG,
                                         win_gui_Queue.__SAMPLE_SIZE)

        self.send_data_sync_msg(obj)
Example #35
0
 def __init__(
     self,
     name: str = "UdderQueue",
     num_feeders: int = 1,
     context: str = "spawn",
 ):
     self.name = name
     self.parse_arguments(num_feeders, name, context)
     Queue.__init__(maxsize=0, ctx=multiprocessing.get_context(context))
     self.num_feeders = num_feeders
     self.lock = Lock
Example #36
0
 def launchSequentialProcess(worldRunner, observableQueue, updateQueue,
                             worldNumber, runNumber, useGUI):
     """ Run world in the same process since processes run sequentially. """
     forkQueue = Queue()
     worldRunner(manager, observableQueue, updateQueue, forkQueue,
                 worldNumber, runNumber, useGUI)
     # Empty fork queue
     try:
         while True:
             forkQueue.get(False)
     except:
         pass
Example #37
0
    def getResult(self,key):
        "This method return the result in imdb for given key"
        spider = ImdbSpider(key)
        result_queue = Queue()
        crawler = CrawlerWorker(spider, result_queue)
        crawler.start()
        results = result_queue.get()

        if len(results)>self.maxResult :
            del results[self.maxResult:]
        logging.debug('%s results', len(results))
        return results
Example #38
0
    def __init__(self, settings=None):
        """
        Parms:
          settings (scrapy.settings.Settings) - settings to apply.  Defaults
        to Scrapy default settings.
        """
        kwargs = {"ctx": __import__("billiard.synchronize")}

        self.results = Queue(**kwargs)
        self.items = []
        self.settings = settings or Settings()
        dispatcher.connect(self._item_scraped, signals.item_scraped)
Example #39
0
    def __init__(self, pull_interval=5):

        self.input = None
        self.filter = None
        self.output = None

        # for input write and filter read
        self.iqueue = Queue()
        # for filter write and output read
        self.oqueue = Queue()

        self.pull_interval = pull_interval

        self.__init_all()
Example #40
0
def findCalibrationChessboard(image):
    findTimeout = 10
    patternSize = (7, 7)  # Internal corners of 8x8 chessboard
    grayImg = cv.CreateMat(image.rows, image.cols, cv.CV_8UC1)
    cv.CvtColor(image, grayImg, cv.CV_RGB2GRAY)
    cv.AddWeighted(grayImg, -1, grayImg, 0, 255, grayImg)
    cornerListQueue = Queue()

    def getCorners(idx, inImg, cornersQueue):
        """Search for corners in image and put them in the queue"""
        print "{} Searching".format(idx)
        _, corners = cv.FindChessboardCorners(inImg,
                                        patternSize)
        print "{} found {} corners".format(idx, len(corners))
        saveimg(inImg, name="Chessboard_Search_{}".format(idx))
        cornersQueue.put(corners)

    for i in range(0, 12, 3):
        img = cv.CloneMat(grayImg)
        cv.Erode(img, img, iterations=i)
        cv.Dilate(img, img, iterations=i)

        p = multiprocessing.Process(target=lambda: getCorners(i, img, cornerListQueue))
        p.daemon = True
        p.start()

    corners = []
    while len(corners) != 49 and i > 0:
        corners = cornerListQueue.get(True)
        print "Got Result {}".format(i)
        i -= 1
    if len(corners) == 49:
        # Debug Image
        debugImg = cv.CreateMat(grayImg.rows, grayImg.cols, cv.CV_8UC3)
        cv.CvtColor(grayImg, debugImg, cv.CV_GRAY2RGB)
        for pt in corners:
            pt = (int(pt[0]), int(pt[1]))
            cv.Circle(debugImg, pt, 4, (255, 0, 0))
        saveimg(debugImg, name="Corners_Found")
        # //Debug Image
        # Figure out the correct corner mapping
        points = sorted([corners[42], corners[0], corners[6], corners[48]], key=lambda pt: pt[0] + pt[1])
        if points[1][0] < points[2][0]:
            points[1], points[2] = points[2], points[1]  # swap tr/bl as needed
        (tl, tr, bl, br) = points
        warpCorners = [tl, tr, br, bl]
    else:
        print "Could not find corners"
        warpCorners = []
    return warpCorners
Example #41
0
 def testDodgyActor(self):
     queue = Queue()
     yield self.spawn(actor_class=DodgyActor, max_requests=1,
                      ioqueue=queue, on_event=on_event)
     proxy = pulsar.get_actor().get_actor(self.a.aid)
     self.assertEqual(proxy.name, 'dodgyactor')
     queue.put(('request', 'Hello'))
     c = 0
     while c < 20:
         if not proxy.is_alive():
             break
         else:
             c += 1
             yield pulsar.NOT_DONE
     self.assertFalse(proxy.is_alive())
Example #42
0
    def __init__(self):
        self._storage = AuthorsStorage("author_generator_data_former")
        self._r = praw.Reddit(user_agent=choice(USER_AGENTS))
        self._queue = Queue()

        adder = ActionGeneratorDataFormer.AuthorAdder(self._queue, self)
        adder.start()
Example #43
0
 def _start(self,name,cpu, module_name, class_name, params):
     fn = None
     
     self._processes = []
     self._in_queue = Queue()
     self._out_queue = Queue()
     self._log_queue = Queue()
     
     if name == "mapper":
         fn = q_run_mapper
     elif name == "reducer":
         fn = q_run_reducer
     
     for i in range(cpu):
         process = Process(target=fn,args=(module_name, class_name ,params, self._in_queue, self._out_queue, self._log_queue))
         self._processes.append(process)
         process.start()
Example #44
0
def test_spy():
    """Test the measure spy working.

    """
    q = Queue()
    data = TaskDatabase()
    spy = MeasureSpy(queue=q, observed_database=data,
                     observed_entries=('test',))

    data.notifier(('test', 1))
    assert q.get()

    data.notifier(('test2', 1))
    assert q.empty()

    spy.close()
    assert q.get() == ('', '')
Example #45
0
    def __enter__(self):
        self.smtp_process_queue = Queue()
        self.smtp_process = Process(
            target=get_otp_mail, args=(self.smtp_process_queue, self.timeout))
        self.smtp_process.start()
        self.port = self.smtp_process_queue.get(True, 5)
        self._do_lintop_config()

        return self
 def __init__(self, solvers, lock):
     self._solvers = solvers
     self._lock = lock
     self._q = Queue()
     self._qDistributor = Queue()
     self._queues = {}
     self._type = SolverImplType.THREADED
     super(AsynchronousSolver, self).__init__()
     self._go()
Example #47
0
class UDPServer(Process):
    def __init__(self, queue):
        Process.__init__(self, name = "UDPServer")
        #self.daemon = True
        self.queue = queue
        self.shutdownQueue = Queue()
        self.start()

    def __checkShutdown(self):
        try:
            self.shutdownQueue.get(block = False)
        except Empty, _e:
            return self.reactor.callLater(1, self.__checkShutdown)
        self.shutdownQueue.close()

        if self.reactor.running:
            self.reactor.stop()
            self.queue.close()
    def get(self, block=True, timeout=None):
        ret = Queue.get(self, block, timeout)
        if self.qsize() == 0:
            self.cond_empty.acquire()
            try:
                self.cond_empty.notify_all()
            finally:
                self.cond_empty.release()

        return ret
Example #49
0
 def start_work(self, worker, work, num_jobs, *args, **kwargs):
     '''work should be and indexable sequence''' 
     wlen = len(work)
     if not wlen: return
     if self._counter is not None: self._counter.set_work(wlen)
     #determine number of jobs to start
     if not num_jobs: num_jobs = cpu_count
     #prepare jobs
     in_queue = Queue(wlen+num_jobs)
     self._jobs = [None]*num_jobs
     for j in xrange(num_jobs):
         queue = Queue()
         job   = UProcess(target=worker, args=(queue, self._abort_event, 
                                               in_queue, work)+args, kwargs=kwargs)
         job.daemon = self._daemonic
         self._jobs[j] = Job(job,queue)
     self.start_jobs()
     for i in xrange(wlen): in_queue.put(i, False)
     for j in xrange(num_jobs): in_queue.put(None, False)
Example #50
0
def test_multiprocess_tasks():
    wait_until_convenient()
    TAG = "message_q"
    def fetch_task(queue):
        pid = os.getpid()
        count = 0
        for dq in q.listen(TAG, timeout=1):
            s = { 'pid': pid, 'data': dq }
            if dq:
                count += 1
                queue.put(s)
                sleep(uniform(0.1, 0.5)) # sleep 0.1~0.5 seconds randomly
            elif q.count(TAG) == 0:
                return count # the number of tasks done by this process


    test_items = range(0, 10000) # enqueue 10000 tasks
    for i in test_items:
        q.enqueue(TAG, i + 1)

    while q.count(TAG) != len(test_items): # wait until test data is ready
        wait_until_convenient()

    jobs = []
    wait_until_convenient()
    queue = Queue()
    start = timer()
    num_p = 30 # the number of processes to use
    for i in range(0, num_p):
        job = Process(target=fetch_task, args=(queue,))
        jobs.append(job)
        job.start() # start task process

    remaining = q.count(TAG)
    while remaining > 0: # wait until the queue is consumed completely
        remaining = q.count(TAG)
        sys.stdout.write('\rRunning test_multiprocess_tasks - remaining %5d/%5d' % (remaining, len(test_items),))
        sys.stdout.flush()
        wait_until_convenient()

    processed_data = set()
    qsize = 0
    while not queue.empty():
        item = queue.get()
        data = item.get('data')
        qsize += 1
        assert data not in processed_data, "failed test_multiprocess_tasks - data %s has been processed already" % (data, )
        processed_data.add(item.get('data'))

    queue.close()
    queue.join_thread()
    for j in jobs:
        j.join()

    assert qsize == len(test_items), "failed test_multiprocess_tasks - tasks are not complete %d/%d" % (qsize, len(test_items), )
    end = timer()
    print("\rOK test_multiprocess_tasks - %d done in %5d seconds" % (qsize, end - start))
 def put(self,element):
     '''
     Put the element in the queue
     Raises an exception if too many errors are
     encountered
     '''
     dt = 1e-3
     while dt < 1:
         try:
             Queue.put(self,element)
             return
         except IOError:
             logger.warning('IOError encountered in SafeQueue put()')
             try:
                 time.sleep(dt)
             except:pass
             dt *= 2
             
     e = IOError('Unrecoverable error')
     raise e
Example #52
0
 def __init__(self, name, ns="", solicited=True, ignoreUnhandled=False, maxAsync=None):
     super(ApiBase, self).__init__(ns=ns, solicited=solicited, name=name)
     self._setup(ns=self._getNamespace(), solicited=self.solicited, ipc=self.ipc)
     self._dataRxCount = itertools.count(0)
     self._ignoreUnhandled = ignoreUnhandled
     if (maxAsync == None) or (maxAsync < 1):
         maxAsync = ApiBase.DEFAULT_MAX_ASYNC_HANDLERS
     self._maxAsync = maxAsync
     self._q = Queue()
     self._workers = []
     self._createAsyncWorkers()
     self.isAlive = True
Example #53
0
 def __init__(self, peerName, theTime, filename, quiet):
     self._peerName = peerName
     self._fp = open(filename, "w")
     self.fp.write("File debugger started at: %(T)s for client: %(C)s"%{"T":theTime, "C":peerName})
     self.fp.flush()
     self._methods = []
     methods = iMockDebuggerSink()._getMethods()
     self._methods = methods
     self._terminate = False
     self.quiet=  quiet
     self._startMutex = Semaphore(0)
     self._q = Queue()
     self.thread = None
Example #54
0
def parallel_sort(bam, out, n_workers):
    lb = BGZFReader(bam)
    mem = lb.uncompressed_size
    buf = RawArray(ctypes.c_char, mem)
    q = Queue()
    procs = []

    block_allocs = chunk(lb.blocks, n_workers)
    offsets = [0] + list(accumulate(sum(b.offset for b in blocks) 
                                    for blocks in block_allocs))[:-1]
    ary_szs = [sum([b.size_u for b in blocks]) for blocks in block_allocs]
    bufs = [RawArray(ctypes.c_char,mem) for mem in ary_szs]
    z = zip(chunk(lb.blocks, n_workers), offsets, bufs)
    for i,(blocks,off,buf) in enumerate(z):
        args = (i, bam, blocks, off, buf, q)
        p = Process(target=sort_read_ary, args=args)
        procs.append(p)
        p.start()

    combined = []
    for _ in procs:
        combined += q.get(True)
    logging.debug("Starting combined sort on %i reads" % len(combined))
    combined.sort()
    logging.debug("Finished combined sort")

    for p in procs:
        p.join()
        logging.debug("Returned from " + str(p))

    hdr = RawBAM(gzip.GzipFile(bam), header=True).rawheader
    with open(out, 'wb') as f:
        write_bgzf_block(f, hdr)
        for creads in grouper(READS_PER_BLOCK, combined):
            data = ""
            for i,cr in enumerate(creads):
                data += bufs[cr.worker_num][cr.ptr:(cr.ptr+cr.bs+4)] 
            write_bgzf_block(f, data)
        write_bam_eof(f)
Example #55
0
    def yk_monitor(self, mon_l):
        # forming command to run parallel monitoring processes
        mon_cmd = ' & '.join(["xinput test {}".format(y_id) for y_id in mon_l])
        monitor = subprocess.Popen(mon_cmd, shell=True, stdout=subprocess.PIPE)

        stdout_queue = Queue()
        stdout_reader = AsynchronousFileReader(monitor.stdout, stdout_queue)
        stdout_reader.start()

        triggered = False
        timestamp = time.time()
        while not stdout_reader.eof and time.time() - timestamp < TIMEOUT:
            while stdout_queue.qsize() > 0:
                stdout_queue.get()  # emptying queue
                triggered = True
                time.sleep(.04)
            if triggered:
                print('YubiKey triggered. Now disabling.')
                break

            time.sleep(.001)
        if not triggered:
            print('No YubiKey triggered. Timeout.')
Example #56
0
    def __init__(self, scrlck_mode=False):
        self.scrlck_mode = scrlck_mode

        self.id_q = Queue()
        self.on_q = Queue()
        self.pi_q = Queue()

        # init processes
        gi_proc = Process(target=self.get_ids)
        gi_proc.daemon = True

        cs_proc = Process(target=self.change_state)
        # no daemon, or main program will terminate before Keys can be unlocked
        cs_proc.daemon = False

        zmq_lis = ZmqListener(
            self.on_q)  # somehow works ony with threads not processes
        zmq_lis_thr = Thread(target=zmq_lis.start_listener)
        zmq_lis_thr.setDaemon(True)

        pi = PanelIndicator(self.pi_q, self.on_q)

        # starting processes and catching exceptions:
        try:
            gi_proc.start()
            cs_proc.start()
            zmq_lis_thr.start()

            pi.run_pi()  # main loop of root process

        except (KeyboardInterrupt, SystemExit):
            print('Caught exit event.')

        finally:
            # send exit signal, will reactivate YubiKey slots
            print('Sending EXIT_SIGNAL')
            self.on_q.put(EXIT_SIGNAL)