Exemple #1
0
class ThreeFM(Process):
    def __init__(self, d, name, year):
        super().__init__()
        self.d = d
        self.name = name
        self.year = year
        self.r = []
        self._mutex = RLock()
        self._empty = Condition(self._mutex)
        self._full = Condition(self._mutex)

    def run(self):
        with ThreadPoolExecutor(max_workers=2) as pool:
            q = {
                pool.submit(self.put, self.name, dat(self.year, m, 1)): m
                for m in range(1, 13)
            }

    def __str__(self):
        return str(self.d.values()[0])

    def put(self, name, date):
        with self._full:
            while len(self.r) >= 12:
                self._full.wait()
            self.r.append(stat(date, name))
            self.d[0] = self.r
            self._empty.notify()

    def get(self):
        return self.d.values()[0]
Exemple #2
0
    def __init__(self, device_id, setup_sim, update_sim, output_var_shape):
        """
        Args:
        - device_id (int): GPU device to use for rendering (0-indexed)
        - setup_sim (callback): callback that is given a device_id and
            returns a MjSim. It is responsible for making MjSim render
            to given device.
        - update_sim (callback): callback given a sim and device_id, and
            should return a numpy array of shape `output_var_shape`.
        - output_var_shape (tuple): shape of the synchronized output
            array from `update_sim`.
        """
        self.device_id = device_id
        self.setup_sim = setup_sim
        self.update_sim = update_sim

        # Create a synchronized output variable (numpy array)
        self._shared_output_var = Array(ctypes.c_double,
                                        int(np.prod(output_var_shape)))
        self._output_var = np.frombuffer(self._shared_output_var.get_obj())

        # Number of variables used to communicate with process
        self._cv = Condition()
        self._ready = Value('b', 0)
        self._start = Value('b', 0)
        self._terminate = Value('b', 0)

        # Start the actual process
        self._process = Process(target=self._run)
        self._process.start()
Exemple #3
0
 def __init__(self, *args):
     TServer.__init__(self, *args)
     self.numWorkers = 10
     self.workers = []
     self.isRunning = Value('b', False)
     self.stopCondition = Condition()
     self.postForkCallback = None
Exemple #4
0
 def __init__(self):
     super().__init__()
     self.lock = Condition()
     self.done = Value("H", 0)
     self.file = Value(c_wchar_p, "")
     self.progress = Value("d", 0.0)
     self.callbacks = []
    def __init__(self, name='unknown', seed=None, trumps='all'):
        super().__init__(name, seed, trumps)
        self.action_received = Condition()
        self.observation_received = Condition()

        self.action = {}
        self.observation = {}
Exemple #6
0
class ServerProcess(Process):
    def __init__(self):
        super().__init__()
        self.url = f"opc.tcp://127.0.0.1:{port_num}"
        self.cond = Condition()
        self.stop_ev = Event()

    async def run_server(self, url):
        srv = Server()
        srv.set_endpoint(url)
        await srv.init()
        await add_server_methods(srv)
        await add_server_custom_enum_struct(srv)
        async with srv:
            with self.cond:
                self.cond.notify_all()
            while not self.stop_ev.is_set():
                await asyncio.sleep(1)
        await srv.stop()

    def stop(self):
        self.stop_ev.set()

    async def wait_for_start(self):
        with ThreadPoolExecutor() as pool:
            result = await asyncio.get_running_loop().run_in_executor(
                pool, self.wait_for_start_sync)

    def wait_for_start_sync(self):
        with self.cond:
            self.cond.wait()

    def run(self):
        loop = asyncio.new_event_loop()
        loop.run_until_complete(self.run_server(self.url))
    def __init__(self, storageType, manager=None):
        """
        Inicializace uložiště.
        :type storageType: StorageType
        :param storageType: Druh uložiště. Všechny podporované druhy vymezuje StorageType.
        :param manager: Volitelný parametr. Pokud chceme vnutit použití jiného multiprocessing.Manager.
        """
        if manager is None:
            manager = Manager()
        # Type checking
        if not isinstance(storageType, self.StorageType):
            raise TypeError('storageType musí být instancí StorageType')

        #Zde budou ukládány data,
        if storageType == self.StorageType.LIST:
            self._storage = SharedList(manager)
        elif storageType == self.StorageType.DICT:
            self._storage = SharedDict(manager)
        elif storageType == self.StorageType.DICT_SIMPLE:
            self._storage = manager.dict()
        else:
            raise ValueError('Neznámý druh uložiště (storageType).')

        self.__usedManager = manager
        #Sdílený zámek pro synchronizaci procesů
        self.__sharedLock = Lock()

        #počet uložených klasifikátorů
        self._numOfData = Value(c_ulong, 0)

        self.__waitForChange = Condition()

        self.acquiredStorage = False
Exemple #8
0
    def start_cache_process(self, rules=URLFrontierRules()):
        """
        Starts the child process that maintains the URL cache.

        Arguments:
            rules (URLFrontierRules): The rules to be applied to the cache.
        """
        with self._start_term_lock:
            cs = rules.checksum
            if cs not in self._cache_procs.keys():
                self._url_queues[cs] = Queue(maxsize=self._max_size)
                self._job_queues[cs] = Queue()
                self._next_url_locks[cs] = RLock()
                self._fill_conds[cs] = Condition()
                self._empty_conds[cs] = Condition()
                self._mid_empty_conds[cs] = Condition()
                self._job_conds[cs] = Condition()
                self._cache_procs[cs] = Process(target=_monitor_cache,
                                                args=(self.dao,
                                                      self._max_size,
                                                      self._url_queues[cs],
                                                      self._job_queues[cs],
                                                      self._job_conds[cs],
                                                      self._fill_conds[cs],
                                                      self._empty_conds[cs],
                                                      rules.required_domains,
                                                      rules.blocked_domains,
                                                      rules.sort_list,
                                                      self._logger_lock))
                self._proc_counts[cs] = 0
            if not self._cache_procs[cs].is_alive():
                with self._logger_lock:
                    logger.info('Starting the cache process for rule=%s' % cs)
                self._cache_procs[cs].start()
            self._proc_counts[cs] += 1
Exemple #9
0
    def __init__(self, runner, fn, sig, args, kwargs):
        self.name = fn.__name__
        self.id = None
        self.graph = None
        self._runner = runner
        self._fn = fn
        self._sig = sig
        self._args = {}

        # Runtime control
        self._latch = Value('i', 0)
        self.triggered = Condition()

        # I/O
        self.inputs = {}
        self.outputs = {}
        self.edges = []

        # Flags
        self.is_fusee = False
        self.is_fused = False
        self.is_source = False
        self.is_sink = False
        self.is_staging = False
        self.is_transform = False

        self._set_inputs(fn, args, kwargs)
        self._set_outputs(fn, args)
Exemple #10
0
    def __init__(self,
                 table,
                 workers=cpu_count(),
                 maxconn=cpu_count(),
                 maxbuff=50000,
                 batchsize=5000,
                 *args,
                 **kwargs):

        self.table = table
        self.maxbuff = maxbuff
        self.maxconn = maxconn
        self.batchsize = batchsize
        self._args = args
        self._kwargs = kwargs
        self._queue = Queue()
        self._buffer_notifier = Condition()
        self._conn_notifier = Condition()
        self._conns = Value('i', 0)
        self._buffsize = Value('i', 0)
        self._sent = Value('i', 0)
        self._workers = 0
        self._buffer = []
        self._procs = []
        self._spawn(workers)
        self._progress()
Exemple #11
0
    def __init__(self):
        """Initialises the RWLock."""
        self._condition = Condition()
        self._readers = Value(c_uint64, 0, lock=False)
        self._writers_waiting = Value(c_uint64, 0, lock=False)

        self.for_reading = self.ReadLock(self)
        self.for_writing = self.WriteLock(self)
 def _make_producer(self, index):
     set_q = Queue()
     get_q = Queue()
     set_cond = Condition()
     get_cond = Condition()
     producer = _Producer(self._init_fn, self._split_dat[index], self._proc_fn, [set_q, get_q], [set_cond, get_cond])
     producer.start()
     return producer, set_q, set_cond, get_q, get_cond
Exemple #13
0
    def __init__(self):

        self.pcb = None

        self.__mutex = RLock()
        self.__pcb_not_set = Condition(self.__mutex)
        self.__mem_not_allocated = Condition(self.__mutex)
        self.__round_robin_policy_on = False
Exemple #14
0
 def __init__(self, maxsize):
     self.queue = Queue(maxsize=maxsize)
     self.lock = Lock()
     self.getlock = Lock()
     self.putcounter = Value('i', -1)
     self.getcounter = Value('i', 0)
     self.cond = Condition(self.lock)
     self.manager = Manager()
     self.getlist = self.manager.list()
Exemple #15
0
 def __init__(self, d, name, year):
     super().__init__()
     self.d = d
     self.name = name
     self.year = year
     self.r = []
     self._mutex = RLock()
     self._empty = Condition(self._mutex)
     self._full = Condition(self._mutex)
Exemple #16
0
 def __init__(self, a_device, a_kernel, std_in=StandardInput(), std_out=StandardOutput()):
     Thread.__init__(self)
     self.set_device(a_device)
     self.set_kernel(a_kernel)
     self.set_input(std_in)
     self.set_output(std_out)
     self.set_mutex(RLock())
     self.set_queue(SoQueue())
     self.device_is_in_use = Condition(self.get_mutex())
     self.the_queue_is_empty = Condition(self.get_mutex())
Exemple #17
0
 def __init__(self, prod_end, fname, SHARED_QUEUE_SIZE_LIMIT):
     super(Producer, self).__init__()
     self.prod_end = prod_end
     self.fp = open(fname, 'r')
     self.SHARED_QUEUE_SIZE_LIMIT = SHARED_QUEUE_SIZE_LIMIT
     self.batch_queue = []
     self.condition = Condition()
     self.pipe_out_thread = PipeOutThread(prod_end, self.condition,
                                          self.SHARED_QUEUE_SIZE_LIMIT,
                                          self.batch_queue)
Exemple #18
0
 def __init__(self):
     super(CountBucket, self).__init__()
     self.matches = set([])
     self.runtime_stats_query_fun = None
     self.outstanding_switches = []
     self.packet_count = 0
     self.byte_count = 0
     self.packet_count_persistent = 0
     self.byte_count_persistent = 0
     self.in_update_cv = Condition()
     self.in_update = False
Exemple #19
0
 def __init__(self, engine, max_working=1):
     self.condition = Condition(RLock())
     self.engine = engine
     self.max_working = max_working
     self.running = False
     self.paused = False
     self.metadata = sa.MetaData(self.engine)
     self._table_prefix = 'exscript_pipeline_'
     self._table_map = {}
     self.__update_table_names()
     self.clear()
    def update_cells(
            self,
            cond_looking_for_positions: multiprocessing.Condition,
            event_compute_results: multiprocessing.Event,
            positions
    ):
        """
        :param cond_looking_for_positions:
        :param event_compute_results:
        :param positions:
        """

        while self.__is_alive:
            # print(f"[{os.getpid()}] update_cells")
            duplication = self.universe.copy()
            _positions = np.argwhere(self.universe == 1)
            _positions = np.array_split(_positions, multiprocessing.cpu_count() - 1)

            positions[:] = [position for position in _positions]

            with cond_looking_for_positions:
                cond_looking_for_positions.notify_all()
            event_compute_results.wait()
            event_compute_results.clear()

            _positions = positions[0]
            for position in positions[1:]:
                _positions = np.concatenate((_positions, position))
            # Remove duplicates
            _positions = np.unique(_positions, axis=0)

            for position in _positions:
                x, y = position
                min_y = max(y - 1, 0)
                max_y = min(y + 2, self.max_y)
                min_x = max(x - 1, 0)
                max_x = min(x + 2, self.max_x)
                sum_of_cells = 0
                for i in range(min_x, max_x):
                    for j in range(min_y, max_y):
                        if i == x and j == y:
                            continue
                        sum_of_cells += self.universe[i, j]

                if sum_of_cells == 3:
                    duplication[x][y] = 1
                if sum_of_cells < 2 or sum_of_cells > 3:
                    duplication[x][y] = 0

            self.universe = duplication
            self.__shared_universe[:] = self.universe.flatten()[:]
            time.sleep(self.speed)
 def __init__(self, handler, listModelConfig, *args, logger=None, timeout=0.1, batchSize=1):
     TServer.__init__(self, *args)
     self.timeout = timeout
     self.batchSize = batchSize
     if logger is not None:
         self.logger = logger
     else:
         self.logger = logging.getLogger(__name__)
     self.listModelConfig = listModelConfig
     self.handler = handler
     self.workers = []
     self.isRunning = Value('b', False)
     self.stopCondition = Condition()
     self.postForkCallback = None
def setter(array: Array, cv: Condition):
    """
    Sets the value of the first (and only) index
    position in array exclusively and notifies the process
    awaiting this action that a valid value is available.
    :param array: a multiprocessing Array of integers of size 1 to which to write a value
    :param cv: a Condition object (a condition variable) to allow the value to be read only when it is ready
    """
    print('In setter.')
    with cv:
        print('Before setting:', array[0])
        array[0] = 43
        print('After setting:', array[0])
        cv.notify()
Exemple #23
0
    def __call__(self, cv_iterator, evaluator, fold_callback=None,
                 n_jobs=None):
        """
        """
        condvar = Condition()
        results = []

        def _signal_cb(result):
            condvar.acquire()
            results.append(result)
            condvar.notify()
            condvar.release()
        folds = list(cv_iterator)

        pool, deferreds = self.async(folds, evaluator,
                                     fold_callback=_signal_cb, n_jobs=n_jobs)
        pool.close()
        while len(results) < len(folds):
            condvar.acquire()
            condvar.wait()
            fold_estimator, result = results[-1]
            fold_callback(fold_estimator, result)
            condvar.release()
        pool.join()
        return results
Exemple #24
0
 def __init__(self, max_working = 1):
     self.condition   = Condition(RLock())
     self.max_working = max_working
     self.running     = True
     self.paused      = False
     self.queue       = None
     self.force       = None
     self.sleeping    = None
     self.working     = None
     self.item2id     = None
     self.id2item     = None # for performance reasons
     self.name2id     = None
     self.id2name     = None
     self.clear()
Exemple #25
0
def getDataSet(time0, users, rgp):
    start = time.time()
    plt_queue = Queue()
    plt_cond = Condition()
    pool = []
    max_process = 2
    i = 0
    n_user = len(users.index)
    while i < n_user:
        if len(pool) >= max_process:
            plt_cond.acquire()
            if plt_queue.empty():
                plt_cond.wait()
            while not plt_queue.empty():
                pos = -1
                uq = plt_queue.get()
                for pos in range(len(pool)):
                    if pool[pos].u == uq:
                        break
                pool[pos].join()
                del pool[pos]
            plt_cond.release()

        u = users.index[i]
        m = users.loc[u, "memberSince"]
        p = MultipRecord(rgp, u, m, time0, plt_queue, plt_cond)
        pool.append(p)
        p.start()
        i += 1
    print "subProcess start....."
    [p.join() for p in pool]
    end = time.time()
    print "time for make Reg trainSet: %.3f s" % (end - start)
Exemple #26
0
    def __init__(self, bufferSize):
        # Shared Data
        self.buffer = Array('i', bufferSize)
        self.bufferSize = bufferSize
        self.freePositions = Value('i', bufferSize)

        # Local Data
        self.nextRead = 0
        self.nextWrite = 0

        # Control Data
        self.mutex = Lock()
        self.items = Condition(self.mutex)
        self.positions = Condition(self.mutex)
Exemple #27
0
class Producer(Process):
    def __init__(self, prod_end, fname, SHARED_QUEUE_SIZE_LIMIT):
        super(Producer, self).__init__()
        self.prod_end = prod_end
        self.fp = open(fname, 'r')
        self.SHARED_QUEUE_SIZE_LIMIT = SHARED_QUEUE_SIZE_LIMIT
        self.batch_queue = []
        self.condition = Condition()
        self.pipe_out_thread = PipeOutThread(prod_end, self.condition,
                                             self.SHARED_QUEUE_SIZE_LIMIT,
                                             self.batch_queue)

    def _preprocess(self, data):
        N = 1000 * 1000 * 10 * 3
        while N > 0:
            N -= 1
        return data

    def _is_shared_queue_full(self):

        if self.pipe_out_thread.get_queue_size(
        ) >= self.SHARED_QUEUE_SIZE_LIMIT:
            return True
        else:
            return False

    def _preprocess_and_put_in_queue(self, data):

        self.condition.acquire()

        #print 'prod acquired'
        if self._is_shared_queue_full():
            #print 'prod: queue is full so waiting'
            self.condition.wait()

        self.batch_queue.append(self._preprocess(data))
        #print 'self.batch_queue', len(self.batch_queue)
        self.condition.notify()
        self.condition.release()

    def _read_data(self, i, dummy=False):
        if dummy:
            return 'soumya'
        else:
            data = None
            offset = random.randint(5, 16) * GB
            #print 'offset is : ' , offset/GB
            self.fp.seek(offset)
            data = self.fp.read(CHUNK_SIZE_TO_READ)
            #print 'len_data ', len(data)
            return data

    def run(self):
        self.pipe_out_thread.start()
        for i in range(BATCHES):
            data = self._read_data(i)
            self._preprocess_and_put_in_queue(data)
            #print 'prod put %s'%(i)
        self.pipe_out_thread.join()
def getter(array: Array, cv: Condition):
    """
    Awaits notification through a Condition object
    (condition variable) that a value is available in array.
    Reads the value and prints it.
    :param array: a multiprocessing Array of integers of size 1 from which to read a value
    :param cv: a Condition object (a condition variable) to allow the value to be read only when it is ready
    """
    print('In getter.')
    with cv:
        # wait_for takes a _predicate_, which is a boolean-valued function.
        # A lambda expression creates an anonymous function that returns the result of evaluating the body.
        # This invocation causes the process to block until the value in a is set.
        cv.wait_for(lambda: array[0] != 0)
        print(f'Got {array[0]}.')
Exemple #29
0
 def __init__(self,dim_i=3,dim_j=1):
     self.jugadores=[]
     self.M=Manager()
     self.listenner=Listener(address=('localhost',6000),authkey='secret password')
     self.color=0
     
     self.numCasillas=0
     self.numEspera=self.M.list([0,0,self.numCasillas])
     self.desocupado=Condition()      
     self.cont=0
     self.turno=self.M.list([0])
     self.dim_i=dim_i
     self.dim_j=dim_j
     self.len=self.dim_i*self.dim_j
     self.tablero=(self.M.list([0]*(self.dim_i*self.dim_j)))
Exemple #30
0
    def __init__(self, world_class, opt, agents):
        self.inner_world = world_class(opt, agents)

        self.queued_items = Semaphore(0)  # counts num exs to be processed
        self.epochDone = Condition()  # notifies when exs are finished
        self.terminate = Value('b', False)  # tells threads when to shut down
        self.cnt = Value('i', 0)  # number of exs that remain to be processed

        self.threads = []
        for i in range(opt['numthreads']):
            self.threads.append(
                HogwildProcess(i, world_class, opt, agents, self.queued_items,
                               self.epochDone, self.terminate, self.cnt))
        for t in self.threads:
            t.start()
def run():
    global task_process
    global task_condition
    task_condition = Condition()
    task_process = Process(target=wikiconnector.task_processor,
                           args=(dbconfig, task_condition)).start()
    application.run(host='0.0.0.0', port=8000)
Exemple #32
0
def stage_2(num, cond, v):
    print 'Starting', num 
    with cond:
        while v.value != True
        cond.wait()
    print 'stage_2_1:', num

def stage_3(num, cond, v):
        print 'Starting', num
    with cond:
        while v.value != True
        cond.wait()
    print 'stage_3_1:', num

if _name_ == '__main':
    v = Value('i', False)
    procs = []
    cond = Condition()
    #プロセス生成
    s1 = Process(target=stage_1, args=(1,cond,v,))
    s2 = Process(target=stage_2, args=(2,cond,v,))
    s3 = Process(target=stage_3, args=(3,cond,v,))

    #実行待ち行列に追加する。この列では順番に実行を開始する
    for p in procs:
        p.start()
    for p in procs:
        p.join()
 def __init__(self, * args):
     TServer.__init__(self, *args)
     self.numWorkers = 10
     self.workers = []
     self.isRunning = Value('b', False)
     self.stopCondition = Condition()
     self.postForkCallback = None
class Barrier(object):
    def __init__(self, num_threads):
        self.num_threads = num_threads
        self.threads_left = Value('i', num_threads, lock=True)
        self.mutex = Lock()
        self.waitcond = Condition(self.mutex)

    def wait(self):
        self.mutex.acquire()
        self.threads_left.value -= 1
        if self.threads_left.value == 0:
            self.threads_left.value = self.num_threads
            self.waitcond.notify_all()
            self.mutex.release()
        else:
            self.waitcond.wait()
            self.mutex.release()
    def __init__(self):
        """Initialises the RWLock."""
        self._condition = Condition()
        self._readers = Value(c_uint64, 0, lock=False)
        self._writers_waiting = Value(c_uint64, 0, lock=False)

        self.for_reading = self.ReadLock(self)
        self.for_writing = self.WriteLock(self)
Exemple #36
0
 def __init__(self, engine, max_working = 1):
     self.condition     = Condition(RLock())
     self.engine        = engine
     self.max_working   = max_working
     self.running       = False
     self.paused        = False
     self.metadata      = sa.MetaData(self.engine)
     self._table_prefix = 'exscript_pipeline_'
     self._table_map    = {}
     self.__update_table_names()
     self.clear()
Exemple #37
0
 def activate_as_parent(self, debug=False):
     assert not self.child_mode
     self.debug_mode = debug
     self.jobs = []
     self.output_lock = Lock()
     self.parent_mode = True
     self.output_queue = Queue()
     self.status_line_cleared = Condition()
     self.thread = Thread(target=self._print_thread)
     self.thread.daemon = True
     self.thread.start()
Exemple #38
0
class OrderedQueue(object):
    def __init__(self, maxsize):
        self.queue = Queue(maxsize=maxsize)
        self.lock = Lock()
        self.getlock = Lock()
        self.putcounter = Value('i', -1)
        self.getcounter = Value('i', 0)
        self.cond = Condition(self.lock)
        self.manager = Manager()
        self.getlist = self.manager.list()

    def put(self, index, elem):
        with self.lock:
            while index != self.putcounter.value + 1:
                self.cond.wait()
            self.queue.put((index, elem))
            #sys.stderr.write("right after adding data with SEED %i. Queue size is now %i\n" %(index, self.queue.qsize()))
            self.putcounter.value += 1
            self.cond.notify_all()
        
    def get(self):
        with self.getlock:
            for i, element in enumerate(self.getlist):
                index, elem = element 
                if index == self.getcounter.value:
                    self.getcounter.value += 1
                    del self.getlist[i]
                    return (index, elem)
            while True:
                index, elem = self.queue.get()
                if index == self.getcounter.value:
                    self.getcounter.value += 1
                    return (index, elem)
                else:
                    self.getlist.append((index, elem))

    def close(self):
        return self.queue.close()

    def qsize(self):
        return self.queue.qsize()
def test_watch_directory():

    def _cleanup(path):
        for f in listdir(path):
            p = join(path, f)
            if isdir(p):
                rmtree(p)
            elif f != '.nothing':
                unlink(p)

    sample_template = ''
    sample_directory = dirname(realpath(__file__)) + '/sample/'
    watch_directory = sample_directory + 'watch/'
    render_directory = sample_directory + 'render/'
    template_directory = sample_directory + 'templates/'
    with open(template_directory + 'haml.tmpl', 'r') as f:
        sample_template = f.read()

    condition = Condition()
    p = Process(target=reloader.watch_directory,
                args=(watch_directory, render_directory, condition))
    condition.acquire()
    p.start()
    condition.wait()

    try:
        with open(watch_directory + 'test.haml', 'w') as f:
            f.write(sample_template)

        subdir = watch_directory + 'test_subdir/'
        try:
            mkdir(subdir)
        except OSError:
            if not isdir(subdir):
                raise
        with open(subdir + 'test_two.haml', 'w') as f:
            f.write(sample_template)

        sleep(1)

        assert_true(exists(render_directory + 'test.html'))
        assert_true(exists(render_directory + 'test_subdir/test_two.html'))
    except:
        raise
    finally:
        condition.release()
        p.terminate()
        p.join()

        sleep(1)

        _cleanup(watch_directory)
        _cleanup(render_directory)
Exemple #40
0
    def __init__(self, world_class, opt, agents):
        self.inner_world = world_class(opt, agents)

        self.queued_items = Semaphore(0)  # counts num exs to be processed
        self.epochDone = Condition()  # notifies when exs are finished
        self.terminate = Value('b', False)  # tells threads when to shut down
        self.cnt = Value('i', 0)  # number of exs that remain to be processed

        self.threads = []
        for i in range(opt['numthreads']):
            self.threads.append(HogwildProcess(i, world_class, opt,
                                               agents, self.queued_items,
                                               self.epochDone, self.terminate,
                                               self.cnt))
        for t in self.threads:
            t.start()
Exemple #41
0
class SynchronizingBus(Bus):
    def __init__(self, sync_delay=1):
        Bus.__init__(self)
        self.sync_delay = sync_delay
        self.condition = Condition()

    def start(self):
        import time
        time.sleep(self.sync_delay)
        self.log("Releasing children")
        self.condition.acquire()
        self.condition.notify_all()
        self.condition.release()
        Bus.start(self)
Exemple #42
0
    def start(self):
        if self._started:
            return

        self.manager = manager = Manager()
        self.shared_uuid_fn_dict = manager.dict()
        self.shared_uuid_map_dict = manager.dict()
        self.shared_master_blocks = manager.dict()
        self.download_cond = Condition()

        self._started = True
        self.ctx = zmq.Context()
        self.host = socket.gethostname()
        if GUIDE_ADDR not in env.environ:
            start_guide_manager()

        self.guide_addr = env.get(GUIDE_ADDR)
        self.random_inst = random.SystemRandom()
        self.server_addr, self.server_thread = self.start_server()
        self.uuid_state_dict = {}
        self.uuid_map_dict = {}
        self.master_broadcast_blocks = {}
        env.register(DOWNLOAD_ADDR, self.server_addr)
Exemple #43
0
class Pipeline(object):
    """
    A collection that is similar to Python's Queue object, except
    it also tracks items that are currently sleeping or in progress.
    """
    def __init__(self, max_working = 1):
        self.condition   = Condition(RLock())
        self.max_working = max_working
        self.running     = True
        self.paused      = False
        self.queue       = None
        self.force       = None
        self.sleeping    = None
        self.working     = None
        self.item2id     = None
        self.id2item     = None # for performance reasons
        self.name2id     = None
        self.id2name     = None
        self.clear()

    def __len__(self):
        with self.condition:
            return len(self.id2item)

    def __contains__(self, item):
        with self.condition:
            return item in self.item2id

    def _register_item(self, name, item):
        uuid               = uuid4().hex
        self.id2item[uuid] = item
        self.item2id[item] = uuid
        if name is None:
            return uuid
        if name in self.name2id:
            msg = 'an item named %s is already queued' % repr(name)
            raise AttributeError(msg)
        self.name2id[name] = uuid
        self.id2name[uuid] = name
        return uuid

    def get_from_name(self, name):
        """
        Returns the item with the given name, or None if no such item
        is known.
        """
        with self.condition:
            try:
                item_id = self.name2id[name]
            except KeyError:
                return None
            return self.id2item[item_id]
        return None

    def has_id(self, item_id):
        """
        Returns True if the queue contains an item with the given id.
        """
        return item_id in self.id2item

    def task_done(self, item):
        with self.condition:
            try:
                self.working.remove(item)
            except KeyError:
                # This may happen if we receive a notification from a
                # thread that was previously enqueued, but then the
                # workqueue was forcefully stopped without waiting for
                # child threads to complete.
                self.condition.notify_all()
                return
            item_id = self.item2id.pop(item)
            self.id2item.pop(item_id)
            try:
                name = self.id2name.pop(item_id)
            except KeyError:
                pass
            else:
                self.name2id.pop(name)
            self.condition.notify_all()

    def append(self, item, name = None):
        """
        Adds the given item to the end of the pipeline.
        """
        with self.condition:
            self.queue.append(item)
            uuid = self._register_item(name, item)
            self.condition.notify_all()
            return uuid

    def appendleft(self, item, name = None, force = False):
        with self.condition:
            if force:
                self.force.append(item)
            else:
                self.queue.appendleft(item)
            uuid = self._register_item(name, item)
            self.condition.notify_all()
            return uuid

    def prioritize(self, item, force = False):
        """
        Moves the item to the very left of the queue.
        """
        with self.condition:
            # If the job is already running (or about to be forced),
            # there is nothing to be done.
            if item in self.working or item in self.force:
                return
            self.queue.remove(item)
            if force:
                self.force.append(item)
            else:
                self.queue.appendleft(item)
            self.condition.notify_all()

    def clear(self):
        with self.condition:
            self.queue    = deque()
            self.force    = deque()
            self.sleeping = set()
            self.working  = set()
            self.item2id  = dict()
            self.id2item  = dict()
            self.name2id  = dict()
            self.id2name  = dict()
            self.condition.notify_all()

    def stop(self):
        """
        Force the next() method to return while in another thread.
        The return value of next() will be None.
        """
        with self.condition:
            self.running = False
            self.condition.notify_all()

    def start(self):
        with self.condition:
            self.running = True
            self.condition.notify_all()

    def pause(self):
        with self.condition:
            self.paused = True
            self.condition.notify_all()

    def unpause(self):
        with self.condition:
            self.paused = False
            self.condition.notify_all()

    def sleep(self, item):
        with self.condition:
            self.sleeping.add(item)
            self.condition.notify_all()

    def wake(self, item):
        assert item in self.sleeping
        with self.condition:
            self.sleeping.remove(item)
            self.condition.notify_all()

    def wait_for_id(self, item_id):
        with self.condition:
            while self.has_id(item_id):
                self.condition.wait()

    def wait(self):
        """
        Waits for all currently running tasks to complete.
        """
        with self.condition:
            while self.working:
                self.condition.wait()

    def wait_all(self):
        """
        Waits for all queued and running tasks to complete.
        """
        with self.condition:
            while len(self) > 0:
                self.condition.wait()

    def with_lock(self, function, *args, **kwargs):
        with self.condition:
            return function(self, *args, **kwargs)

    def set_max_working(self, max_working):
        with self.condition:
            self.max_working = int(max_working)
            self.condition.notify_all()

    def get_max_working(self):
        return self.max_working

    def get_working(self):
        return list(self.working)

    def _popleft_sleeping(self):
        sleeping = []
        while True:
            try:
                node = self.queue[0]
            except IndexError:
                break
            if node not in self.sleeping:
                break
            sleeping.append(node)
            self.queue.popleft()
        return sleeping

    def _get_next(self, pop = True):
        # We need to leave sleeping items in the queue because else we
        # would not know their original position after they wake up.
        # So we need to temporarily remove sleeping items from the top of
        # the queue here.
        sleeping = self._popleft_sleeping()

        # Get the first non-sleeping item from the queue.
        if pop:
            try:
                next = self.queue.popleft()
            except IndexError:
                next = None
        else:
            try:
                next = self.queue[0]
            except IndexError:
                next = None

        # Re-insert sleeping items.
        self.queue.extendleft(sleeping)
        return next

    def try_next(self):
        """
        Like next(), but only returns the item that would be selected
        right now, without locking and without changing the queue.
        """
        with self.condition:
            try:
                return self.force[0]
            except IndexError:
                pass

            return self._get_next(False)

    def next(self):
        with self.condition:
            while self.running:
                if self.paused:
                    self.condition.wait()
                    continue

                # Wait until enough slots are available.
                if len(self.working) - \
                   len(self.sleeping) - \
                   len(self.force) >= self.max_working:
                    self.condition.wait()
                    continue

                # Forced items are returned regardless of how many tasks
                # are already working.
                try:
                    next = self.force.popleft()
                except IndexError:
                    pass
                else:
                    self.working.add(next)
                    return next

                # Return the first non-sleeping task.
                next = self._get_next()
                if next is None:
                    self.condition.wait()
                    continue
                self.working.add(next)
                return next
        return None
class TProcessPoolServer(TServer):

    """
    Server with a fixed size pool of worker subprocesses which service requests.
    Note that if you need shared state between the handlers - it's up to you!
    Written by Dvir Volk, doat.com
    """

    def __init__(self, * args):
        TServer.__init__(self, *args)
        self.numWorkers = 10
        self.workers = []
        self.isRunning = Value('b', False)
        self.stopCondition = Condition()
        self.postForkCallback = None

    def setPostForkCallback(self, callback):
        if not callable(callback):
            raise TypeError("This is not a callback!")
        self.postForkCallback = callback

    def setNumWorkers(self, num):
        """Set the number of worker threads that should be created"""
        self.numWorkers = num

    def workerProcess(self):
        """Loop around getting clients from the shared queue and process them."""

        if self.postForkCallback:
            self.postForkCallback()

        while self.isRunning.value == True:
            try:
                client = self.serverTransport.accept()
                self.serveClient(client)
            except (KeyboardInterrupt, SystemExit):
                return 0
            except (Exception) as x:
                logging.exception(x)

    def serveClient(self, client):
        """Process input/output from a client for as long as possible"""
        itrans = self.inputTransportFactory.getTransport(client)
        otrans = self.outputTransportFactory.getTransport(client)
        iprot = self.inputProtocolFactory.getProtocol(itrans)
        oprot = self.outputProtocolFactory.getProtocol(otrans)

        try:
            while True:
                self.processor.process(iprot, oprot)
        except (TTransportException) as tx:
            pass
        except (Exception) as x:
            logging.exception(x)

        itrans.close()
        otrans.close()


    def serve(self):
        """Start a fixed number of worker threads and put client into a queue"""

        #this is a shared state that can tell the workers to exit when set as false
        self.isRunning.value = True

        #first bind and listen to the port
        self.serverTransport.listen()

        #fork the children
        for i in range(self.numWorkers):
            try:
                w = Process(target=self.workerProcess)
                w.daemon = True
                w.start()
                self.workers.append(w)
            except (Exception) as x:
                logging.exception(x)

        #wait until the condition is set by stop()

        while True:

            self.stopCondition.acquire()
            try:
                self.stopCondition.wait()
                break
            except (SystemExit, KeyboardInterrupt):
		break
            except (Exception) as x:
                logging.exception(x)

        self.isRunning.value = False

    def stop(self):
        self.isRunning.value = False
        self.stopCondition.acquire()
        self.stopCondition.notify()
        self.stopCondition.release()
Exemple #45
0
class DownloadManager(object):
    def __init__(self):
        self._started = False
        self.server_thread = None
        self.download_threads = {}
        self.uuid_state_dict = None
        self.uuid_map_dict = None
        self.guide_addr = None
        self.server_addr = None
        self.host = None
        self.ctx = None
        self.random_inst = None
        self.master_broadcast_blocks = {}

    def start(self):
        if self._started:
            return

        self.manager = manager = Manager()
        self.shared_uuid_fn_dict = manager.dict()
        self.shared_uuid_map_dict = manager.dict()
        self.shared_master_blocks = manager.dict()
        self.download_cond = Condition()

        self._started = True
        self.ctx = zmq.Context()
        self.host = socket.gethostname()
        if GUIDE_ADDR not in env.environ:
            start_guide_manager()

        self.guide_addr = env.get(GUIDE_ADDR)
        self.random_inst = random.SystemRandom()
        self.server_addr, self.server_thread = self.start_server()
        self.uuid_state_dict = {}
        self.uuid_map_dict = {}
        self.master_broadcast_blocks = {}
        env.register(DOWNLOAD_ADDR, self.server_addr)

    def start_server(self):
        sock = self.ctx.socket(zmq.REP)
        sock.setsockopt(zmq.LINGER, 0)
        port = sock.bind_to_random_port("tcp://0.0.0.0")
        server_addr = 'tcp://%s:%d' % (self.host, port)
        guide_sock = self.ctx.socket(zmq.REQ)
        guide_sock.setsockopt(zmq.LINGER, 0)
        guide_sock.connect(self.guide_addr)

        def run():
            logger.debug("server started at %s", server_addr)

            while self._started:
                if not sock.poll(1000, zmq.POLLIN):
                    continue
                type_, msg = sock.recv_pyobj()
                logger.debug('server recv: %s %s', type_, msg)
                if type_ == SERVER_STOP:
                    sock.send_pyobj(None)
                    break
                elif type_ == SERVER_FETCH:
                    uuid, indices, client_addr = msg
                    if uuid in self.master_broadcast_blocks:
                        block_num = len(self.master_broadcast_blocks[uuid])
                        bls = []
                        for index in indices:
                            if index >= block_num:
                                logger.warning('input index too big %s for '
                                               'len of blocks  %d from host %s',
                                               str(indices), block_num, client_addr)
                                sock.send_pyobj((SERVER_FETCH_FAIL, None))
                            else:
                                bls.append(self.master_broadcast_blocks[uuid][index])
                        sock.send_pyobj((SERVER_FETCH_OK, (indices, bls)))
                    elif uuid in self.uuid_state_dict:
                        fd = os.open(self.uuid_state_dict[uuid][0], os.O_RDONLY)
                        mmfp = mmap.mmap(fd, 0, access=ACCESS_READ)
                        os.close(fd)
                        bitmap = self.uuid_map_dict[uuid]
                        block_num = len(bitmap)
                        bls = []
                        for index in indices:
                            if index >= block_num:
                                logger.warning('input index too big %s for '
                                               'len of blocks  %d from host %s',
                                               str(indices), block_num, client_addr)
                                sock.send_pyobj((SERVER_FETCH_FAIL, None))
                            else:
                                mmfp.seek(bitmap[index][0])
                                block = mmfp.read(bitmap[index][1])
                                bls.append(block)
                        mmfp.close()
                        sock.send_pyobj((SERVER_FETCH_OK, (indices, bls)))
                    else:
                        logger.warning('server fetch failed for uuid %s '
                                       'not exists in server %s from host %s',
                                       uuid, socket.gethostname(), client_addr)
                        sock.send_pyobj((SERVER_FETCH_FAIL, None))
                elif type_ == DATA_GET:
                    uuid, compressed_size = msg
                    if uuid not in self.uuid_state_dict or not self.uuid_state_dict[uuid][1]:
                        if uuid not in self.download_threads:
                            sources = self._get_sources(uuid, guide_sock)
                            if not sources:
                                logger.warning('get sources from guide server failed in host %s',
                                               socket.gethostname())
                                sock.send_pyobj(DATA_GET_FAIL)
                                continue
                            self.download_threads[uuid] = spawn(self._download_blocks,
                                                                *[sources, uuid, compressed_size])
                            sock.send_pyobj(DATA_DOWNLOADING)
                        else:
                            sock.send_pyobj(DATA_DOWNLOADING)
                    else:
                        sock.send_pyobj(DATA_GET_OK)
                elif type_ == SERVER_CLEAR_ITEM:
                    uuid = msg
                    self.clear(uuid)
                    sock.send_pyobj(None)
                else:
                    logger.error('Unknown server message: %s %s', type_, msg)
                    sock.send_pyobj(None)

            sock.close()
            logger.debug("stop Broadcast server %s", server_addr)
            for uuid in list(self.uuid_state_dict.keys()):
                self.clear(uuid)

        return server_addr, spawn(run)

    def get_blocks(self, uuid):
        if uuid in self.master_broadcast_blocks:
            return self.master_broadcast_blocks[uuid]
        if uuid in self.shared_master_blocks:
            return self.shared_master_blocks[uuid]

    def register_blocks(self, uuid, blocks):
        if uuid in self.master_broadcast_blocks:
            logger.warning('the block uuid %s exists in dict', uuid)
            return
        self.master_broadcast_blocks[uuid] = blocks
        self.shared_master_blocks[uuid] = blocks

    def _get_sources(self, uuid, source_sock):
        try:
            source_sock.send_pyobj((GUIDE_GET_SOURCES,
                                    uuid))
            sources = source_sock.recv_pyobj()
        except:
            logger.warning('GET sources failed for addr %s with ZMQ ERR',
                           self.server_addr)
            sources = {}
        return sources

    def _update_sources(self, uuid, bitmap, source_sock):
        try:
            source_sock.send_pyobj((GUIDE_SET_SOURCES,
                                    (uuid, self.server_addr, bitmap)))
            source_sock.recv_pyobj()
        except:
            pass

    def _download_blocks(self, sources, uuid, compressed_size):
        block_num = 0
        bitmap = [0]
        write_mmap_handler = None
        download_guide_sock = self.ctx.socket(zmq.REQ)
        download_guide_sock.setsockopt(zmq.LINGER, 0)
        download_guide_sock.connect(self.guide_addr)

        def _report_bad(addr):
            logger.debug('fetch blocks failed from server %s', addr)
            download_guide_sock.send_pyobj((GUIDE_REPORT_BAD, (uuid, addr)))
            download_guide_sock.recv_pyobj()

        def _fetch(addr, indices, bit_map):
            sock = self.ctx.socket(zmq.REQ)
            try:
                sock.setsockopt(zmq.LINGER, 0)
                sock.connect(addr)
                sock.send_pyobj((SERVER_FETCH, (uuid, indices, self.server_addr)))
                avail = sock.poll(1 * 1000, zmq.POLLIN)
                check_sock = None
                if not avail:
                    try:
                        check_sock = socket.socket()
                        addr_list = addr[len('tcp://'):].split(':')
                        addr_list[1] = int(addr_list[1])
                        check_sock.connect(tuple(addr_list))
                    except Exception as e:
                        logger.warning('connect the addr %s failed with exception %s',
                                       addr, e)
                        _report_bad(addr)
                    else:
                        logger.debug("%s recv broadcast %s from %s timeout",
                                     self.server_addr, str(indices), addr)
                    finally:
                        if check_sock:
                            check_sock.close()
                    return
                result, msg = sock.recv_pyobj()
                if result == SERVER_FETCH_FAIL:
                    _report_bad(addr)
                    return
                if result == SERVER_FETCH_OK:
                    indices, blocks = msg
                    for rank, index in enumerate(indices):
                        if blocks[rank] is not None:
                            write_mmap_handler.seek(bit_map[index][0])
                            write_mmap_handler.write(blocks[rank])
                            bitmap[index] = bit_map[index]
                else:
                    raise RuntimeError('Unknown server response: %s %s' % (result, msg))
            finally:
                sock.close()

        final_path = env.workdir.alloc_tmp_file("broadcast")
        self.uuid_state_dict[uuid] = (final_path, False)
        fp = open(final_path, 'wb')
        fp.truncate(compressed_size)
        fp.close()
        fd = os.open(final_path, os.O_RDWR)
        write_mmap_handler = mmap.mmap(fd, 0,
                                       access=ACCESS_WRITE)
        os.close(fd)
        while not all(bitmap):
            remote = []
            for _addr, _bitmap in six.iteritems(sources):
                if block_num == 0:
                    block_num = len(_bitmap)
                    bitmap = [0] * block_num
                    self.uuid_map_dict[uuid] = bitmap
                if not _addr.startswith('tcp://%s:' % self.host):
                    remote.append((_addr, _bitmap))
            self.random_inst.shuffle(remote)
            for _addr, _bitmap in remote:
                _indices = [i for i in range(block_num) if not bitmap[i] and _bitmap[i]]
                if _indices:
                    self.random_inst.shuffle(_indices)
                    _fetch(_addr, _indices[:BATCHED_BLOCKS], _bitmap)
                    self._update_sources(uuid, bitmap, download_guide_sock)
            sources = self._get_sources(uuid, download_guide_sock)
        write_mmap_handler.flush()
        write_mmap_handler.close()
        self.shared_uuid_map_dict[uuid] = bitmap
        self.shared_uuid_fn_dict[uuid] = self.uuid_state_dict[uuid][0]
        self.uuid_state_dict[uuid] = self.uuid_state_dict[uuid][0], True
        download_guide_sock.close()
        with self.download_cond:
            self.download_cond.notify_all()

    def clear(self, uuid):
        if uuid in self.master_broadcast_blocks:
            del self.master_broadcast_blocks[uuid]
            del self.shared_master_blocks[uuid]
        if uuid in self.uuid_state_dict:
            del self.uuid_state_dict[uuid]
        if uuid in self.shared_uuid_fn_dict:
            del self.shared_uuid_fn_dict[uuid]
            del self.shared_uuid_map_dict[uuid]

    def shutdown(self):
        if not self._started:
            return

        self._started = False
        if self.server_thread and self.server_addr. \
                startswith('tcp://%s:' % socket.gethostname()):
            for _, th in six.iteritems(self.download_threads):
                th.join(timeout=0.1)  # only in executor, not needed
            self.server_thread.join(timeout=1)
            if self.server_thread.is_alive():
                logger.warning("Download mananger server_thread not stopped.")

        self.manager.shutdown()  # shutdown will try join and terminate server process
Exemple #46
0
 def __init__(self, sync_delay=1):
     Bus.__init__(self)
     self.sync_delay = sync_delay
     self.condition = Condition()
 def __init__(self, num_threads):
     self.num_threads = num_threads
     self.threads_left = Value('i', num_threads, lock=True)
     self.mutex = Lock()
     self.waitcond = Condition(self.mutex)
Exemple #48
0
class DBPipeline(object):
    """
    Like L{Exscript.workqueue.Pipeline}, but keeps all queued objects
    in a database, instead of using in-memory data structures.
    """
    def __init__(self, engine, max_working = 1):
        self.condition     = Condition(RLock())
        self.engine        = engine
        self.max_working   = max_working
        self.running       = False
        self.paused        = False
        self.metadata      = sa.MetaData(self.engine)
        self._table_prefix = 'exscript_pipeline_'
        self._table_map    = {}
        self.__update_table_names()
        self.clear()

    def __add_table(self, table):
        """
        Adds a new table to the internal table list.
        
        @type  table: Table
        @param table: An sqlalchemy table.
        """
        pfx = self._table_prefix
        self._table_map[table.name[len(pfx):]] = table

    def __update_table_names(self):
        """
        Adds all tables to the internal table list.
        """
        pfx = self._table_prefix
        self.__add_table(sa.Table(pfx + 'job', self.metadata,
            sa.Column('id',     sa.Integer, primary_key = True),
            sa.Column('name',   sa.String(150), index = True),
            sa.Column('status', sa.String(50), index = True),
            sa.Column('job',    sa.PickleType()),
            mysql_engine = 'INNODB'
        ))

    @synchronized
    def install(self):
        """
        Installs (or upgrades) database tables.
        """
        self.metadata.create_all()

    @synchronized
    def uninstall(self):
        """
        Drops all tables from the database. Use with care.
        """
        self.metadata.drop_all()

    @synchronized
    def clear_database(self):
        """
        Drops the content of any database table used by this library.
        Use with care.

        Wipes out everything, including types, actions, resources and acls.
        """
        delete = self._table_map['job'].delete()
        delete.execute()

    def debug(self, debug = True):
        """
        Enable/disable debugging.

        @type  debug: bool
        @param debug: True to enable debugging.
        """
        self.engine.echo = debug

    def set_table_prefix(self, prefix):
        """
        Define a string that is prefixed to all table names in the database.

        @type  prefix: string
        @param prefix: The new prefix.
        """
        self._table_prefix = prefix
        self.__update_table_names()

    def get_table_prefix(self):
        """
        Returns the current database table prefix.
        
        @rtype:  string
        @return: The current prefix.
        """
        return self._table_prefix

    def __len__(self):
        return self._table_map['job'].count().execute().fetchone()[0]

    def __contains__(self, item):
        return self.has_id(id(item))

    def get_from_name(self, name):
        """
        Returns the item with the given name, or None if no such item
        is known.
        """
        with self.condition:
            tbl_j = self._table_map['job']
            query = tbl_j.select(tbl_j.c.name == name)
            row   = query.execute().fetchone()
            if row is None:
                return None
            return row.job

    def has_id(self, item_id):
        """
        Returns True if the queue contains an item with the given id.
        """
        tbl_j = self._table_map['job']
        query = tbl_j.select(tbl_j.c.id == item_id).count()
        return query.execute().fetchone()[0] > 0

    def task_done(self, item):
        with self.condition:
            self.working.remove(item)
            self.all.remove(id(item))
            self.condition.notify_all()

    def append(self, item):
        with self.condition:
            self.queue.append(item)
            self.all.add(id(item))
            self.condition.notify_all()

    def appendleft(self, item, force = False):
        with self.condition:
            if force:
                self.force.append(item)
            else:
                self.queue.appendleft(item)
            self.all.add(id(item))
            self.condition.notify_all()

    def prioritize(self, item, force = False):
        """
        Moves the item to the very left of the queue.
        """
        with self.condition:
            # If the job is already running (or about to be forced),
            # there is nothing to be done.
            if item in self.working or item in self.force:
                return
            self.queue.remove(item)
            self.appendleft(item, force)
            self.condition.notify_all()

    def clear(self):
        with self.condition:
            self.queue    = deque()
            self.force    = deque()
            self.sleeping = set()
            self.working  = set()
            self.all      = set()
            self.condition.notify_all()

    def stop(self):
        """
        Force the next() method to return while in another thread.
        The return value of next() will be None.
        """
        with self.condition:
            self.running = False
            self.condition.notify_all()

    def pause(self):
        with self.condition:
            self.paused = True
            self.condition.notify_all()

    def unpause(self):
        with self.condition:
            self.paused = False
            self.condition.notify_all()

    def sleep(self, item):
        assert id(item) in self.all
        with self.condition:
            self.sleeping.add(item)
            self.condition.notify_all()

    def wake(self, item):
        assert id(item) in self.all
        assert item in self.sleeping
        with self.condition:
            self.sleeping.remove(item)
            self.condition.notify_all()

    def wait_for_id(self, item_id):
        with self.condition:
            while self.has_id(item_id):
                self.condition.wait()

    def wait(self):
        """
        Waits for all currently running tasks to complete.
        """
        with self.condition:
            while self.working:
                self.condition.wait()

    def wait_all(self):
        """
        Waits for all queued and running tasks to complete.
        """
        with self.condition:
            while len(self) > 0:
                self.condition.wait()

    def with_lock(self, function, *args, **kwargs):
        with self.condition:
            return function(self, *args, **kwargs)

    def set_max_working(self, max_working):
        with self.condition:
            self.max_working = int(max_working)
            self.condition.notify_all()

    def get_max_working(self):
        return self.max_working

    def get_working(self):
        return list(self.working)

    def _popleft_sleeping(self):
        sleeping = []
        while True:
            try:
                node = self.queue[0]
            except IndexError:
                break
            if node not in self.sleeping:
                break
            sleeping.append(node)
            self.queue.popleft()
        return sleeping

    def _get_next(self):
        # We need to leave sleeping items in the queue because else we
        # would not know their original position after they wake up.
        # So we need to temporarily remove sleeping items from the top of
        # the queue here.
        sleeping = self._popleft_sleeping()

        # Get the first non-sleeping item from the queue.
        try:
            next = self.queue.popleft()
        except IndexError:
            next = None

        # Re-insert sleeping items.
        self.queue.extendleft(sleeping)
        return next

    def next(self):
        with self.condition:
            self.running = True
            while self.running:
                if self.paused:
                    self.condition.wait()
                    continue

                # Wait until enough slots are available.
                if len(self.working) - \
                   len(self.sleeping) - \
                   len(self.force) >= self.max_working:
                    self.condition.wait()
                    continue

                # Forced items are returned regardless of how many tasks
                # are already working.
                try:
                    next = self.force.popleft()
                except IndexError:
                    pass
                else:
                    self.working.add(next)
                    return next

                # Return the first non-sleeping task.
                next = self._get_next()
                if next is None:
                    self.condition.wait()
                    continue
                self.working.add(next)
                return next
class WaitableQueue(Queue):
    """Queue that uses a semaphore to reliably count items in it"""
    class Vacuum(ThreadLoop):
        def __init__(self, q, l):
            def callback():
                q.wait_notempty(0.1)

                while True:
                    try:
                        val = q.get(False)
                        l.append(val)

                    except Empty:
                        break

            ThreadLoop.__init__(self, callback)

    def __init__(self, maxsize=0):
        self.cond_empty = Condition()
        self.cond_notempty = Condition()
        self._put_counter = Value('i', 0)

        Queue.__init__(self, maxsize)

    def put(self, obj, block=True, timeout=None):
        Queue.put(self, obj, block, timeout)
        self._put_counter.value += 1

        if self.qsize() != 0:
            self.cond_notempty.acquire()
            try:
                self.cond_notempty.notify_all()
            finally:
                self.cond_notempty.release()

    @property
    def put_counter(self):
        return self._put_counter.value

    def get(self, block=True, timeout=None):
        ret = Queue.get(self, block, timeout)
        if self.qsize() == 0:
            self.cond_empty.acquire()
            try:
                self.cond_empty.notify_all()
            finally:
                self.cond_empty.release()

        return ret

    def wait_empty(self, timeout=None):
        """Wait for all items to be got"""
        self.cond_empty.acquire()
        try:
            if self.qsize():
                self.cond_empty.wait(timeout)
        finally:
            self.cond_empty.release()

    def wait_notempty(self, timeout=None):
        """Wait for all items to be got"""
        self.cond_notempty.acquire()
        try:
            if self.qsize() == 0:
                self.cond_notempty.wait(timeout)
        finally:
            self.cond_notempty.release()
class CountBucket(Query):
    """
    Class for registering callbacks on counts of packets sent to
    the controller.
    """
    def __init__(self):
        super(CountBucket, self).__init__()
        self.matches = set([])
        self.runtime_stats_query_fun = None
        self.outstanding_switches = []
        self.packet_count = 0
        self.byte_count = 0
        self.packet_count_persistent = 0
        self.byte_count_persistent = 0
        self.in_update_cv = Condition()
        self.in_update = False
        self._classifier = self.generate_classifier()
        
    def __repr__(self):
        return "CountBucket"

    def eval(self, pkt):
        """
        evaluate this policy on a single packet

        :param pkt: the packet on which to be evaluated
        :type pkt: Packet
        :rtype: set Packet
        """
        return set()

    def generate_classifier(self):
        return Classifier([Rule(identity,{self})])

    def apply(self):
        with self.bucket_lock:
            for pkt in self.bucket:
                self.packet_count_persistent += 1
                self.byte_count_persistent += pkt['header_len'] + pkt['payload_len']
            self.bucket.clear()

    def start_update(self):
        """
        Use a condition variable to mediate access to bucket state as it is
        being updated.

        Why condition variables and not locks? The main reason is that the state
        update doesn't happen in just a single function call here, since the
        runtime processes the classifier rule by rule and buckets may be touched
        in arbitrary order depending on the policy. They're not all updated in a
        single function call. In that case,

        (1) Holding locks *across* function calls seems dangerous and
        non-modular (in my opinion), since we need to be aware of this across a
        large function, and acquiring locks in different orders at different
        points in the code can result in tricky deadlocks (there is another lock
        involved in protecting bucket updates in runtime).

        (2) The "with" semantics in python is clean, and splitting that into
        lock.acquire() and lock.release() calls results in possibly replicated
        failure handling code that is boilerplate.

        """
        with self.in_update_cv:
            self.in_update = True
            self.matches = set([])
            self.runtime_stats_query_fun = None
            self.outstanding_switches = []

    def finish_update(self):
        with self.in_update_cv:
            self.in_update = False
            self.in_update_cv.notify_all()
        
    def add_match(self, m):
        """
        Add a match m to list of classifier rules to be queried for
        counts.
        """
        if not m in self.matches:
            self.matches.add(m)

    def add_pull_stats(self, fun):
        """
        Point to function that issues stats queries in the
        runtime.
        """
        if not self.runtime_stats_query_fun:
            self.runtime_stats_query_fun = fun

    def pull_stats(self):
        """Issue stats queries from the runtime"""
        queries_issued = False
        with self.in_update_cv:
            while self.in_update: # ensure buckets not updated concurrently
                self.in_update_cv.wait()
            if not self.runtime_stats_query_fun is None:
                self.outstanding_switches = []
                queries_issued = True
                self.runtime_stats_query_fun()
        # If no queries were issued, then no matches, so just call userland
        # registered callback routines
        if not queries_issued:
            self.packet_count = self.packet_count_persistent
            self.byte_count = self.byte_count_persistent
            for f in self.callbacks:
                f([self.packet_count, self.byte_count])

    def add_outstanding_switch_query(self,switch):
        self.outstanding_switches.append(switch)

    def handle_flow_stats_reply(self,switch,flow_stats):
        """
        Given a flow_stats_reply from switch s, collect only those
        counts which are relevant to this bucket.

        Very simple processing for now: just collect all packet and
        byte counts from rules that have a match that is in the set of
        matches this bucket is interested in.
        """
        def stat_in_bucket(flow_stat, s):
            table_match = match(f['match']).intersect(match(switch=s))
            network_match = match(f['match'])
            if table_match in self.matches or network_match in self.matches:
                return True
            return False

        with self.in_update_cv:
            while self.in_update:
                self.in_update_cv.wait()
            self.packet_count = self.packet_count_persistent
            self.byte_count = self.byte_count_persistent
            if switch in self.outstanding_switches:
                for f in flow_stats:
                    if 'match' in f:
                        if stat_in_bucket(f, switch):
                            self.packet_count += f['packet_count']
                            self.byte_count   += f['byte_count']
                self.outstanding_switches.remove(switch)
        # If have all necessary data, call user-land registered callbacks
        if not self.outstanding_switches:
            for f in self.callbacks:
                f([self.packet_count, self.byte_count])

    def __eq__(self, other):
        # TODO: if buckets eventually have names, equality should
        # be on names.
        return isinstance(other, CountBucket)
Exemple #51
0
class IODeviceManager(Thread):

    def __init__(self, a_device, a_kernel, std_in=StandardInput(), std_out=StandardOutput()):
        Thread.__init__(self)
        self.set_device(a_device)
        self.set_kernel(a_kernel)
        self.set_input(std_in)
        self.set_output(std_out)
        self.set_mutex(RLock())
        self.set_queue(SoQueue())
        self.device_is_in_use = Condition(self.get_mutex())
        self.the_queue_is_empty = Condition(self.get_mutex())

    def get_kernel(self):
        return self.kernel

    def set_kernel(self, a_kernel):
        self.kernel = a_kernel

    def set_input(self, a_input):
        self.std_in = a_input

    def get_input(self):
        return self.std_in

    def set_output(self, a_output):
        self.std_out = a_output

    def get_output(self):
        return self.std_out

    def get_mutex(self):
        return self.mutex
        
    def set_mutex(self, a_mutex):   
        self.mutex = a_mutex
        
    def get_queue(self):
        return self.queue
        
    def set_queue(self, a_queue):
        self.queue = a_queue
    
    def set_device(self, a_device):
        self.device = a_device
        self.get_device().set_device_manager(self)

    def get_device(self):
        return self.device
        
    def the_device_is_busy(self):
        with self.get_mutex():
            return not self.get_device().is_not_busy()
    
    def send_to_device(self):
        with self.device_is_in_use:
            while self.the_device_is_busy():
                self.device_is_in_use.wait()
            with self.get_mutex():
                self.get_device().set_pcb(self.get())
                self.get_device().process_pcb()

    def notify_that_the_device_is_not_in_use(self):
        with self.device_is_in_use:
            self.device_is_in_use.notify()
    
    def put(self, a_pcb):
        with self.the_queue_is_empty:
            with self.get_mutex():
                self.get_queue().add_pcb(a_pcb)
                self.the_queue_is_empty.notify()
        
    def get(self):
        with self.get_mutex():
            return self.get_queue().get_first()

    def queue_is_empty(self):
        return self.get_queue().is_empty()

    def send_io_end_interruption(self, a_pcb):
        self.get_kernel().get_irq_manager().handle(Irq(IO_END_INTERRUPT,  a_pcb))
            
    def run(self):
        while True:
            with self.the_queue_is_empty:
                while self.queue_is_empty():
                    self.the_queue_is_empty.wait()
                self.send_to_device()
def solve(max_level, goal, num_workers):
    # prepare message queue shared with workers
    tasks = Queue()
    task_lock = Lock()
    task_cv = Condition(lock=task_lock)

    # create and start workers
    workers = []
    for i in range(0, num_workers):
        solutions = set()
        parent_conn, child_connn = Pipe()
        worker = Process(target=run_worker,
                         args=(child_connn, goal, max_level, tasks,
                               task_lock, task_cv))
        worker.start()
        workers.append((worker, parent_conn))

    # Find all possible sequences: [n0, n1, n2, ..., nM] (M=max_level)
    # where nX is the number of binary operators so that
    # '1 <n0 ops> 2 <n1 ops> 3 <n2 ops> ... M+1 <nM ops>' can be a valid
    # Reverse Polish Notation.  Key conditions are:
    # 1. n0 + n1 + ... + nM = M
    # 2. for any X, n0 + n1 + ... + nX <= X
    # (Note that from condition #2 n0 is always 0.)
    # We'll build the sequences in 'numops_list' below while exploring cases
    # in a BFS-like (or DP-like) manner.

    # This is a queue to maintain outstanding search results.  Its each element
    # is a tuple of 2 items: 'numops_list', 'total_ops'
    # A tuple of (N, T) means:
    # - N = [n0, n1, ..., nX]
    # - T = sum(N)
    # (Note that we don't necessarily have to keep T as it can be derived
    # from N.  But we do this for efficiency).
    # The search is completed when len(N) reaches M (i.e., X=M-1) by appending
    # the last item of nM = M - (n0 + n1 + ... + nX) = M - T (see condition #1).
    tmp = [([0], 0)]

    while tmp:
        numops_list, total_ops = tmp.pop(0)
        level = len(numops_list)
        if level < max_level:
            # Expand the sequence with all possible numbers of operators at
            # the current level so we can explore the next level for each of
            # them.
            for i in range(0, level - total_ops + 1): # see condition #2
                tmp.append((numops_list + [i], total_ops + i))
        else:
            # Found one valid RPN template.  Pass it to workers and have them
            # work on it.
            numops_list.append(max_level - total_ops)
            with task_lock:
                tasks.put(numops_list)
                task_cv.notify()

    # Tell workers all data have been passed.
    solutions = set()
    with task_lock:
        for _ in workers:
            tasks.put(None)
        task_cv.notify_all()

    # Wait until all workers complete the tasks, while receiving any
    # intermediate and last solutions.  The received solutions may not
    # necessarily be fully unique, so we have to unify them here, again.
    # Received data of 'None' means the corresponding worker has completed
    # its task.
    # Note: here we assume all workers are reasonably equally active in
    # sending data, so we simply perform blocking receive.
    conns = set([w[1] for w in workers])
    while conns:
        for c in conns.copy():
            worker_data = c.recv()
            if worker_data is None:
                conns.remove(c)
                continue
            for solution in worker_data:
                if solution not in solutions:
                    solutions.add(solution)

    # All workers have completed.  Cleanup them and print the final unified
    # results.  If we are to show all expressions (i.e. goal is None), sort
    # results by the expressions' values (listing integers followed by all
    # non-integers, followed by 'divided by 0' cases.
    for w in workers:
        w[0].join()
    if goal is None:
        l = list(solutions)
        l.sort(key=lambda x: (0, x[0]) if type(x[0]) == int else (1, str(x[0])))
        for solution in l:
            print('%s = %s' % (solution[1], str(solution[0])))
    else:
        for solution in solutions:
            print(solution)
import time

from multiprocessing import Process, Condition
import os

cond = Condition()

def wait_condition():
    cond.acquire()
    cond.wait()
    print '[%d] waked!' % os.getpid()
    cond.release()

for i in range(3):
    Process(target=wait_condition).start()

time.sleep(1)
print 'notify!'
cond.acquire()
cond.notify_all()
cond.release()
Exemple #54
0
                            windows.append((
                                '__MOVIE__ ' + i_join,
                                movie
                            ))
        q_out.put(
            '\n'.join(
                '{} {}'.format(
                    i + 1, '\t'.join(windows[i])
                )
                for i in range(len(windows))
            ) + '\n\n'
        )


    # multithreading code
    finished = Condition()
    queued_exs = Value('i', 0)
    proced_exs = Value('i', 0)
    # keep at most 100 examples ready per thread queued (to save memory)
    q = Queue(args['num_threads'] * 100)


    def load(ex):
        global queued_exs
        queued_exs.value += 1
        q.put(ex)


    def run():
        while True:
            ex = q.get()
    def __init__(self, maxsize=0):
        self.cond_empty = Condition()
        self.cond_notempty = Condition()
        self._put_counter = Value('i', 0)

        Queue.__init__(self, maxsize)
Exemple #56
0
class IOManager(object):
    def __init__(self):
        self.capture_mode = False
        self.child_mode = False
        self.parent_mode = False

    def activate_as_child(self, output_lock, output_queue, status_line_cleared):
        self.parent_mode = False
        self.child_mode = True
        self.status_line_cleared = status_line_cleared
        self.output_lock = output_lock
        self.output_queue = output_queue

    def activate_as_parent(self, debug=False):
        assert not self.child_mode
        self.debug_mode = debug
        self.jobs = []
        self.output_lock = Lock()
        self.parent_mode = True
        self.output_queue = Queue()
        self.status_line_cleared = Condition()
        self.thread = Thread(target=self._print_thread)
        self.thread.daemon = True
        self.thread.start()

    def ask(self, question, default, get_input=input_function):
        answers = _("[Y/n]") if default else _("[y/N]")
        question = question + " " + answers + " "
        with self.lock:
            while True:
                STDOUT_WRITER.write("\a")
                STDOUT_WRITER.write(question)
                STDOUT_WRITER.flush()

                answer = get_input()
                if answer.lower() in (_("y"), _("yes")) or (
                    not answer and default
                ):
                    return True
                elif answer.lower() in (_("n"), _("no")) or (
                    not answer and not default
                ):
                    return False
                STDOUT_WRITER.write(_("Please answer with 'y(es)' or 'n(o)'.\n"))

    @contextmanager
    def capture(self):
        self.capture_mode = True
        self.captured_io = {
            'stderr': "",
            'stdout': "",
        }
        yield self.captured_io
        self.capture_mode = False

    @property
    def child_parameters(self):
        return (self.output_lock, self.output_queue, self.status_line_cleared)

    def debug(self, msg):
        self.output_queue.put({'msg': 'LOG', 'log_type': 'DBG', 'text': msg})

    def job_add(self, msg):
        self.output_queue.put({'msg': 'LOG', 'log_type': 'JOB_ADD', 'text': msg})

    def job_del(self, msg):
        self.output_queue.put({'msg': 'LOG', 'log_type': 'JOB_DEL', 'text': msg})

    def stderr(self, msg):
        self.output_queue.put({'msg': 'LOG', 'log_type': 'ERR', 'text': msg})

    def stdout(self, msg):
        self.output_queue.put({'msg': 'LOG', 'log_type': 'OUT', 'text': msg})

    @contextmanager
    def job(self, job_text):
        self.job_add(job_text)
        yield
        self.job_del(job_text)

    @property
    @contextmanager
    def lock(self):
        with self.output_lock:
            self.status_line_cleared.wait()
            yield

    def _print_thread(self):
        assert self.parent_mode
        while True:
            if self.output_lock.acquire(False):
                msg = self.output_queue.get()
                if msg['log_type'] == 'QUIT':
                    break
                if self.debug_mode and msg['log_type'] in ('OUT', 'DBG', 'ERR'):
                    msg['text'] = datetime.now().strftime("[%Y-%m-%d %H:%M:%S.%f] ") + msg['text']
                if self.jobs and TTY:
                    self._write("\r\033[K")
                if msg['log_type'] == 'OUT':
                    self._write(msg['text'] + "\n")
                elif msg['log_type'] == 'ERR':
                    self._write(msg['text'] + "\n", err=True)
                elif msg['log_type'] == 'DBG' and self.debug_mode:
                    self._write(msg['text'] + "\n")
                elif msg['log_type'] == 'JOB_ADD' and TTY:
                    self.jobs.append(msg['text'])
                elif msg['log_type'] == 'JOB_DEL' and TTY:
                    self.jobs.remove(msg['text'])
                if self.jobs and TTY:
                    self._write("[status] " + self.jobs[0])
                self.output_lock.release()
            else:  # someone else is holding the output lock
                # the process holding the lock should now be waiting for
                # us to remove any status lines present before it starts
                # printing
                if self.jobs and TTY:
                    self._write("\r\033[K")
                self.status_line_cleared.notify()
                # now we wait until the other process has finished and
                # released the output lock
                self.output_lock.acquire()
                self.output_lock.release()

    def shutdown(self):
        assert self.parent_mode
        self.output_queue.put({'msg': 'LOG', 'log_type': 'QUIT'})
        self.thread.join()

    def _write(self, msg, err=False):
        write_to_stream(STDERR_WRITER if err else STDOUT_WRITER, msg)
        if self.capture_mode:
            self.captured_io['stderr' if err else 'stdout'] += msg