Exemple #1
0
    def __init__(self,
                 table,
                 workers=cpu_count(),
                 maxconn=cpu_count(),
                 maxbuff=50000,
                 batchsize=5000,
                 *args,
                 **kwargs):

        self.table = table
        self.maxbuff = maxbuff
        self.maxconn = maxconn
        self.batchsize = batchsize
        self._args = args
        self._kwargs = kwargs
        self._queue = Queue()
        self._buffer_notifier = Condition()
        self._conn_notifier = Condition()
        self._conns = Value('i', 0)
        self._buffsize = Value('i', 0)
        self._sent = Value('i', 0)
        self._workers = 0
        self._buffer = []
        self._procs = []
        self._spawn(workers)
        self._progress()
Exemple #2
0
    def start_cache_process(self, rules=URLFrontierRules()):
        """
        Starts the child process that maintains the URL cache.

        Arguments:
            rules (URLFrontierRules): The rules to be applied to the cache.
        """
        with self._start_term_lock:
            cs = rules.checksum
            if cs not in self._cache_procs.keys():
                self._url_queues[cs] = Queue(maxsize=self._max_size)
                self._job_queues[cs] = Queue()
                self._next_url_locks[cs] = RLock()
                self._fill_conds[cs] = Condition()
                self._empty_conds[cs] = Condition()
                self._mid_empty_conds[cs] = Condition()
                self._job_conds[cs] = Condition()
                self._cache_procs[cs] = Process(target=_monitor_cache,
                                                args=(self.dao,
                                                      self._max_size,
                                                      self._url_queues[cs],
                                                      self._job_queues[cs],
                                                      self._job_conds[cs],
                                                      self._fill_conds[cs],
                                                      self._empty_conds[cs],
                                                      rules.required_domains,
                                                      rules.blocked_domains,
                                                      rules.sort_list,
                                                      self._logger_lock))
                self._proc_counts[cs] = 0
            if not self._cache_procs[cs].is_alive():
                with self._logger_lock:
                    logger.info('Starting the cache process for rule=%s' % cs)
                self._cache_procs[cs].start()
            self._proc_counts[cs] += 1
    def __init__(self, name='unknown', seed=None, trumps='all'):
        super().__init__(name, seed, trumps)
        self.action_received = Condition()
        self.observation_received = Condition()

        self.action = {}
        self.observation = {}
 def _make_producer(self, index):
     set_q = Queue()
     get_q = Queue()
     set_cond = Condition()
     get_cond = Condition()
     producer = _Producer(self._init_fn, self._split_dat[index], self._proc_fn, [set_q, get_q], [set_cond, get_cond])
     producer.start()
     return producer, set_q, set_cond, get_q, get_cond
Exemple #5
0
 def __init__(self, d, name, year):
     super().__init__()
     self.d = d
     self.name = name
     self.year = year
     self.r = []
     self._mutex = RLock()
     self._empty = Condition(self._mutex)
     self._full = Condition(self._mutex)
    def __init__(self, bufferSize):
        # Shared Data
        self.buffer = Array('i', bufferSize)
        self.bufferSize = bufferSize
        self.freePositions = Value('i', bufferSize)

        # Local Data
        self.nextRead = 0
        self.nextWrite = 0

        # Control Data
        self.mutex = Lock()
        self.items = Condition(self.mutex)
        self.positions = Condition(self.mutex)
Exemple #7
0
    def __init__(self, runner, fn, sig, args, kwargs):
        self.name = fn.__name__
        self.id = None
        self.graph = None
        self._runner = runner
        self._fn = fn
        self._sig = sig
        self._args = {}

        # Runtime control
        self._latch = Value('i', 0)
        self.triggered = Condition()

        # I/O
        self.inputs = {}
        self.outputs = {}
        self.edges = []

        # Flags
        self.is_fusee = False
        self.is_fused = False
        self.is_source = False
        self.is_sink = False
        self.is_staging = False
        self.is_transform = False

        self._set_inputs(fn, args, kwargs)
        self._set_outputs(fn, args)
def run():
    global task_process
    global task_condition
    task_condition = Condition()
    task_process = Process(target=wikiconnector.task_processor,
                           args=(dbconfig, task_condition)).start()
    application.run(host='0.0.0.0', port=8000)
Exemple #9
0
    def __call__(self, cv_iterator, evaluator, fold_callback=None,
                 n_jobs=None):
        """
        """
        condvar = Condition()
        results = []

        def _signal_cb(result):
            condvar.acquire()
            results.append(result)
            condvar.notify()
            condvar.release()
        folds = list(cv_iterator)

        pool, deferreds = self.async(folds, evaluator,
                                     fold_callback=_signal_cb, n_jobs=n_jobs)
        pool.close()
        while len(results) < len(folds):
            condvar.acquire()
            condvar.wait()
            fold_estimator, result = results[-1]
            fold_callback(fold_estimator, result)
            condvar.release()
        pool.join()
        return results
Exemple #10
0
    def run(self, tasks, build_config, parallel_threads):
        semaphore = Semaphore(parallel_threads)
        process_finished_notify = Condition(Lock())
        while tasks.count_buildable_tasks() > 0:
            task = tasks.get_next()

            if task is None:
                self.wait_tasks_to_complete(parallel_threads,
                                            process_finished_notify, semaphore)
                continue

            semaphore.acquire()
            task.state = Task.State.RUNNING
            logging.debug("Starting task %s", task.name)
            self.start_new_process(process_finished_notify, semaphore,
                                   self.process_job, task, build_config)

        self.wait_tasks_to_complete(parallel_threads, process_finished_notify,
                                    semaphore)

        if tasks.count(Task.State.FAILED) > 0:
            logging.error('Some packages failed to build.')
            logging.error("  %s", tasks.print_name(Task.State.FAILED))
            return 1
        if tasks.count(Task.State.RUNNING) > 0:
            logging.error(
                'Something went wrong, there are still some running tasks.')
            return 1
        if tasks.count(Task.State.NEW) > 0:
            logging.error(
                'Something went wrong, there are still unprocessed tasks.')
            return 1

        logging.info("Build completed successfully.")
        return 0
Exemple #11
0
def main_multi_prc(seq_iter, cpu_count):
    # execute task using parallelism
    from multiprocessing import Process, Queue, Value, Condition
    from ctypes import c_int

    q, c, v = (Queue(), Condition(), Value(c_int, 0))

    # TODO improve design
    global data
    data = tuple(seq_iter)

    # creates min(len(data), cpu_count) processes
    processes = [
        Process(target=reverse_and_print_task, args=(q, c, v))
        for _ in range(min(len(data), cpu_count))
    ]

    for p in processes:
        p.start()
    for i in range(len(data)):
        q.put(i)  # mark entries in queue
    for p in processes:
        q.put(None)
    for p in processes:
        p.join()
Exemple #12
0
def getDataSet(time0, users, rgp):
    start = time.time()
    plt_queue = Queue()
    plt_cond = Condition()
    pool = []
    max_process = 2
    i = 0
    n_user = len(users.index)
    while i < n_user:
        if len(pool) >= max_process:
            plt_cond.acquire()
            if plt_queue.empty():
                plt_cond.wait()
            while not plt_queue.empty():
                pos = -1
                uq = plt_queue.get()
                for pos in range(len(pool)):
                    if pool[pos].u == uq:
                        break
                pool[pos].join()
                del pool[pos]
            plt_cond.release()

        u = users.index[i]
        m = users.loc[u, "memberSince"]
        p = MultipRecord(rgp, u, m, time0, plt_queue, plt_cond)
        pool.append(p)
        p.start()
        i += 1
    print "subProcess start....."
    [p.join() for p in pool]
    end = time.time()
    print "time for make Reg trainSet: %.3f s" % (end - start)
Exemple #13
0
def fork_child(request, comms):
    val = Value('i', 0)
    lock = RLock()
    cond = Condition(lock)

    pid = os.fork()
    if pid:
        # parent
        with lock:
            val.value = 1
            cond.notify_all()
            cond.wait_for(lambda: val.value == 2)
        return pid
    else:
        # child
        # noinspection PyBroadException
        try:
            handler = CaptureHTTPHandler(request, comms)
            with lock:
                cond.wait_for(lambda: val.value == 1)
                val.value = 2
                cond.notify_all()
            handler.serve()
        except Exception:
            request.server.handle_error(request.req, request.client_address)
            with lock:
                cond.wait_for(lambda: val.value == 1)
                val.value = 2
                cond.notify_all()
        finally:
            request.server.shutdown_request(request.req)
            comms.close()
            # child does not exit normally
            import signal
            os.kill(os.getpid(), signal.SIGKILL)
Exemple #14
0
    def filter(self, items: Iterable[Any]) -> Iterable[Any]:

        try:

            with Manager() as manager:

                stdlog = QueueIO(Queue())
                stderr = CobaMultiprocessor.PipeStderr()

                log_thread = Thread(target=Pipes.join(
                    stdlog, Foreach(CobaContext.logger.sink)).run)
                log_thread.daemon = True
                log_thread.start()

                logger = CobaContext.logger
                cacher = ConcurrentCacher(CobaContext.cacher, manager.dict(),
                                          Lock(), Condition())
                store = {"srcsema": Semaphore(2)}

                filter = CobaMultiprocessor.ProcessFilter(
                    self._filter, logger, cacher, store, stdlog)

                for item in Multiprocessor(filter, self._processes,
                                           self._maxtasksperchild,
                                           stderr).filter(items):
                    yield item

                stdlog.write(
                    None
                )  #attempt to shutdown the logging process gracefully by sending the poison pill

        except RuntimeError as e:  #pragma: no cover
            #This happens when importing main causes this code to run again
            coba_exit(str(e))
Exemple #15
0
def stage_2(num, cond, v):
    print 'Starting', num 
    with cond:
        while v.value != True
        cond.wait()
    print 'stage_2_1:', num

def stage_3(num, cond, v):
        print 'Starting', num
    with cond:
        while v.value != True
        cond.wait()
    print 'stage_3_1:', num

if _name_ == '__main':
    v = Value('i', False)
    procs = []
    cond = Condition()
    #プロセス生成
    s1 = Process(target=stage_1, args=(1,cond,v,))
    s2 = Process(target=stage_2, args=(2,cond,v,))
    s3 = Process(target=stage_3, args=(3,cond,v,))

    #実行待ち行列に追加する。この列では順番に実行を開始する
    for p in procs:
        p.start()
    for p in procs:
        p.join()
Exemple #16
0
 def __init__(self):
     super().__init__()
     self.lock = Condition()
     self.done = Value("H", 0)
     self.file = Value(c_wchar_p, "")
     self.progress = Value("d", 0.0)
     self.callbacks = []
Exemple #17
0
 def __init__(self, *args):
     TServer.__init__(self, *args)
     self.numWorkers = 10
     self.workers = []
     self.isRunning = Value('b', False)
     self.stopCondition = Condition()
     self.postForkCallback = None
Exemple #18
0
    def __init__(self, device_id, setup_sim, update_sim, output_var_shape):
        """
        Args:
        - device_id (int): GPU device to use for rendering (0-indexed)
        - setup_sim (callback): callback that is given a device_id and
            returns a MjSim. It is responsible for making MjSim render
            to given device.
        - update_sim (callback): callback given a sim and device_id, and
            should return a numpy array of shape `output_var_shape`.
        - output_var_shape (tuple): shape of the synchronized output
            array from `update_sim`.
        """
        self.device_id = device_id
        self.setup_sim = setup_sim
        self.update_sim = update_sim

        # Create a synchronized output variable (numpy array)
        self._shared_output_var = Array(ctypes.c_double,
                                        int(np.prod(output_var_shape)))
        self._output_var = np.frombuffer(self._shared_output_var.get_obj())

        # Number of variables used to communicate with process
        self._cv = Condition()
        self._ready = Value('b', 0)
        self._start = Value('b', 0)
        self._terminate = Value('b', 0)

        # Start the actual process
        self._process = Process(target=self._run)
        self._process.start()
    def __init__(self, storageType, manager=None):
        """
        Inicializace uložiště.
        :type storageType: StorageType
        :param storageType: Druh uložiště. Všechny podporované druhy vymezuje StorageType.
        :param manager: Volitelný parametr. Pokud chceme vnutit použití jiného multiprocessing.Manager.
        """
        if manager is None:
            manager = Manager()
        # Type checking
        if not isinstance(storageType, self.StorageType):
            raise TypeError('storageType musí být instancí StorageType')

        #Zde budou ukládány data,
        if storageType == self.StorageType.LIST:
            self._storage = SharedList(manager)
        elif storageType == self.StorageType.DICT:
            self._storage = SharedDict(manager)
        elif storageType == self.StorageType.DICT_SIMPLE:
            self._storage = manager.dict()
        else:
            raise ValueError('Neznámý druh uložiště (storageType).')

        self.__usedManager = manager
        #Sdílený zámek pro synchronizaci procesů
        self.__sharedLock = Lock()

        #počet uložených klasifikátorů
        self._numOfData = Value(c_ulong, 0)

        self.__waitForChange = Condition()

        self.acquiredStorage = False
Exemple #20
0
def initialize():
    global g_job_queue, g_done_event, g_image_id
    global g_is_cache_raw_enabled, g_delete_lock
    g_job_queue = Queue()
    g_done_event = Event()
    g_image_id = Array(c_char, 100)
    g_is_cache_raw_enabled = Value(c_bool, False)
    g_delete_lock = Condition()
Exemple #21
0
def test_hostapd(prompt=True):

    q_adv = Queue()
    c_bt2wifi = Condition()
    c_wifi2dsc = Condition()
    p_adv_tcp = Process(name='p_adv_tcp',
                        target=t_adv_tcp,
                        args=(c_bt2wifi, c_wifi2dsc, q_adv))
    p_hostapd = Process(name='p_hostapd',
                        target=t_hostapd,
                        args=(c_bt2wifi, c_wifi2dsc, q_adv))

    p_adv_tcp.start()
    p_hostapd.start()

    p_hostapd.join()
    p_adv_tcp.join()
Exemple #22
0
def test():
    count = Value('i', 0)
    con = Condition()
    for i in range(2):
        Process(target=producer, args=(count, con)).start()

    for i in range(5):
        Process(target=consumer, args=(count, con)).start()
Exemple #23
0
    def __init__(self):
        """Initialises the RWLock."""
        self._condition = Condition()
        self._readers = Value(c_uint64, 0, lock=False)
        self._writers_waiting = Value(c_uint64, 0, lock=False)

        self.for_reading = self.ReadLock(self)
        self.for_writing = self.WriteLock(self)
Exemple #24
0
def main(argv):

    myListFile = argv[0]
    print(myListFile)
    inputDir = argv[1]
    print(inputDir)
    outputDir = argv[2]
    print(outputDir)

    cond1 = Condition()
    cond2 = Condition()
    cond3 = Condition()

    q = Queue()
    qcm = Queue()
    qrm = Queue()
    qaws_size = 0
    #    Thread(target=copyS3ToEc2,args=(cond1,)).start()
    #    Thread(target=hailthread,args=(cond1,cond2,)).start()
    paws = Process(target=copyS3ToEc2,
                   args=(
                       cond1,
                       q,
                       myListFile,
                       inputDir,
                       outputDir,
                       qaws_size,
                   ))
    paws.start()
    phail = Process(target=hailthread,
                    args=(
                        cond1,
                        q,
                        cond2,
                        qcm,
                        inputDir,
                        outputDir,
                        qaws_size,
                    ))
    phail.start()

    paws.join()
    print("paws joined")
    phail.join()
    print("phail joined")
Exemple #25
0
 def __init__(self, maxsize):
     self.queue = Queue(maxsize=maxsize)
     self.lock = Lock()
     self.getlock = Lock()
     self.putcounter = Value('i', -1)
     self.getcounter = Value('i', 0)
     self.cond = Condition(self.lock)
     self.manager = Manager()
     self.getlist = self.manager.list()
Exemple #26
0
    def test_condition_with_normal_value(self):
        from multiprocessing.synchronize import Condition as synchronize_Condition
        from multiprocessing import Condition

        Globalize.condition(condition=Condition())
        from multirunnable.api.manage import Running_Condition
        assert isinstance(
            Running_Condition, synchronize_Condition
        ) is True, "It should save instance to the target global variable *Running_Condition*."
    def __init__(self, manager=None):
        """
        Vytvoří novou sdílenou frontu.
        
        :param manager: Volitelný parametr. Pokud chceme vnutit použití jiného multiprocessing.Manager.
        """
        super.__init__(manager)

        self.__waitForChange = Condition()
 def __init__(self, num_processor, batch_size, phase,
              batch_idx_init = 0, data_ids_init = train_ids, capacity = 10):
     self.num_processor = num_processor
     self.batch_size = batch_size
     self.data_load_capacity = capacity
     self.manager = Manager()
     self.batch_lock = Lock()
     self.mutex = Lock()
     self.cv_full = Condition(self.mutex)
     self.cv_empty = Condition(self.mutex)
     self.data_load_queue = self.manager.list()
     self.cur_batch = self.manager.list([batch_idx_init])
     self.processors = []
     if phase == 'train':
         self.data_ids = self.manager.list(data_ids_init)
     elif phase == 'test':
         self.data_ids = self.manager.list(test_ids)
     else:
         raise ValueError('Could not set phase to %s' % phase)
Exemple #29
0
 def __init__(self, prod_end, fname, SHARED_QUEUE_SIZE_LIMIT):
     super(Producer, self).__init__()
     self.prod_end = prod_end
     self.fp = open(fname, 'r')
     self.SHARED_QUEUE_SIZE_LIMIT = SHARED_QUEUE_SIZE_LIMIT
     self.batch_queue = []
     self.condition = Condition()
     self.pipe_out_thread = PipeOutThread(prod_end, self.condition,
                                          self.SHARED_QUEUE_SIZE_LIMIT,
                                          self.batch_queue)
Exemple #30
0
def main():

    queue = Queue()
    condition = Condition()
    prod = Producer('prod', queue, condition)
    cons = Consumer('cons', queue, condition)

    prod.start()
    cons.start()
    prod.join()
    cons.join()