示例#1
0
class ThreeFM(Process):
    def __init__(self, d, name, year):
        super().__init__()
        self.d = d
        self.name = name
        self.year = year
        self.r = []
        self._mutex = RLock()
        self._empty = Condition(self._mutex)
        self._full = Condition(self._mutex)

    def run(self):
        with ThreadPoolExecutor(max_workers=2) as pool:
            q = {
                pool.submit(self.put, self.name, dat(self.year, m, 1)): m
                for m in range(1, 13)
            }

    def __str__(self):
        return str(self.d.values()[0])

    def put(self, name, date):
        with self._full:
            while len(self.r) >= 12:
                self._full.wait()
            self.r.append(stat(date, name))
            self.d[0] = self.r
            self._empty.notify()

    def get(self):
        return self.d.values()[0]
示例#2
0
class Producer(Process):
    def __init__(self, prod_end, fname, SHARED_QUEUE_SIZE_LIMIT):
        super(Producer, self).__init__()
        self.prod_end = prod_end
        self.fp = open(fname, 'r')
        self.SHARED_QUEUE_SIZE_LIMIT = SHARED_QUEUE_SIZE_LIMIT
        self.batch_queue = []
        self.condition = Condition()
        self.pipe_out_thread = PipeOutThread(prod_end, self.condition,
                                             self.SHARED_QUEUE_SIZE_LIMIT,
                                             self.batch_queue)

    def _preprocess(self, data):
        N = 1000 * 1000 * 10 * 3
        while N > 0:
            N -= 1
        return data

    def _is_shared_queue_full(self):

        if self.pipe_out_thread.get_queue_size(
        ) >= self.SHARED_QUEUE_SIZE_LIMIT:
            return True
        else:
            return False

    def _preprocess_and_put_in_queue(self, data):

        self.condition.acquire()

        #print 'prod acquired'
        if self._is_shared_queue_full():
            #print 'prod: queue is full so waiting'
            self.condition.wait()

        self.batch_queue.append(self._preprocess(data))
        #print 'self.batch_queue', len(self.batch_queue)
        self.condition.notify()
        self.condition.release()

    def _read_data(self, i, dummy=False):
        if dummy:
            return 'soumya'
        else:
            data = None
            offset = random.randint(5, 16) * GB
            #print 'offset is : ' , offset/GB
            self.fp.seek(offset)
            data = self.fp.read(CHUNK_SIZE_TO_READ)
            #print 'len_data ', len(data)
            return data

    def run(self):
        self.pipe_out_thread.start()
        for i in range(BATCHES):
            data = self._read_data(i)
            self._preprocess_and_put_in_queue(data)
            #print 'prod put %s'%(i)
        self.pipe_out_thread.join()
def setter(array: Array, cv: Condition):
    """
    Sets the value of the first (and only) index
    position in array exclusively and notifies the process
    awaiting this action that a valid value is available.
    :param array: a multiprocessing Array of integers of size 1 to which to write a value
    :param cv: a Condition object (a condition variable) to allow the value to be read only when it is ready
    """
    print('In setter.')
    with cv:
        print('Before setting:', array[0])
        array[0] = 43
        print('After setting:', array[0])
        cv.notify()
示例#4
0
class QueuedDistributor(BaseDistributor):
    def __init__(self, socket_handler):
        BaseDistributor.__init__(self, socket_handler)
        self.socket_queue = SimpleQueue()
        self.queue_condition = Condition(Lock())

    def new_worker(self):
        return Process(target=QueuedWorker,
                       args=(self.socket_handler, self.socket_queue,
                             self.queue_condition))

    def process_handel(self, socket_handel):
        self.queue_condition.acquire()
        self.socket_queue.put(socket_handel)
        self.queue_condition.notify()
        self.queue_condition.release()
示例#5
0
class ConditionVariableDemo:
    """
    Demonstrates the use of condition variables within a class.
    """
    def __init__(self):
        self.array = Array('i', 1)
        self.cv = Condition()

    def run(self):
        """
        Creates, runs, and joins a setter and a getter process.
        """
        p0 = Process(target=self.setter)
        p1 = Process(target=self.getter)
        p0.start()
        p1.start()
        p0.join()
        p1.join()

    def setter(self):
        """
        Sets the value of the first (and only) index
        position in a exclusively and notifies the process
        awaiting this action that a valid value is available.
        :param a: a multiprocessing Array of integers of size 1 to which to write a value
        :param cv: a Condition object (a condition variable) to allow the value to be read only when it is ready
        """
        with self.cv:
            print('In setter.')
            print('Before setting:', self.array[0])
            self.array[0] = 43
            print('After setting:', self.array[0])
            self.cv.notify()

    def getter(self):
        """
        Awaits notification through a Condition object
        (condition variable) that a value is available in a.
        Reads the value and prints it.
        :param a: a multiprocessing Array of integers of size 1 from which to read a value
        :param cv: a Condition object (a condition variable) to allow the value to be read only when it is ready
        """
        print('In getter.')
        with self.cv:
            self.cv.wait_for(lambda: self.array[0] != 0)
            print(f'Got {self.array[0]}.')
示例#6
0
class Monitor:
    def __init__(self, bufferSize):
        # Shared Data
        self.buffer = Array('i', bufferSize)
        self.bufferSize = bufferSize
        self.freePositions = Value('i', bufferSize)

        # Local Data
        self.nextRead = 0
        self.nextWrite = 0

        # Control Data
        self.mutex = Lock()
        self.items = Condition(self.mutex)
        self.positions = Condition(self.mutex)

    def produce(self, value):
        self.mutex.acquire()
        if (self.freePositions.value == 0):
            self.positions.wait()

        self.buffer[self.nextWrite] = value
        self.nextWrite = (self.nextWrite + 1) % self.bufferSize
        self.freePositions.value = self.freePositions.value - 1

        self.items.notify()
        self.mutex.release()

    def consume(self):
        newItem = None
        self.mutex.acquire()
        if (self.freePositions.value == self.bufferSize):
            self.items.wait()

        newItem = self.buffer[self.nextRead]
        self.nextRead = (self.nextRead + 1) % self.bufferSize
        self.freePositions.value = self.freePositions.value + 1

        self.positions.notify()
        self.mutex.release()
        return newItem
示例#7
0
class TProcessPoolServer(TServer):
    def __init__(self, *args):
        TServer.__init__(self, *args)
        self.numWorkers = 10
        self.workers = []
        self.isRunning = Value('b', False)
        self.stopCondition = Condition()
        self.postForkCallback = None

    def setPostForkCallback(self, callback):
        if not callable(callback):
            raise TypeError("This is not a callback!")
        self.postForkCallback = callback

    def setNumWorkers(self, num):
        """Set the number of worker threads that should be created"""
        self.numWorkers = num

    def workerProcess(self):
        """Loop getting clients from the shared queue and process them"""
        if self.postForkCallback:
            self.postForkCallback()
        while self.isRunning.value:
            try:
                client = self.serverTransport.accept()
                if not client:
                    continue
                self.serveClient(client)
            except (KeyboardInterrupt, SystemExit):
                return 0
            except Exception as x:
                logger.exception(x)

    def serveClient(self, client):
        itrans = self.inputTransportFactory.getTransport(client)
        otrans = self.outputTransportFactory.getTransport(client)
        iprot = self.inputProtocolFactory.getProtocol(itrans)
        oprot = self.outputProtocolFactory.getProtocol(otrans)
        try:
            while True:
                self.processor.process(iprot, oprot)
        except TTransportException:
            pass
        except Exception as x:
            logger.exception(x)
        itrans.close()
        otrans.close()

    def serve(self):
        self.isRunning.value = True
        self.serverTransport.listen()
        for i in range(self.numWorkers):
            try:
                w = Process(target=self.workerProcess)
                w.daemon = True
                w.start()
                self.workers.append(w)
            except Exception as x:
                logger.exception(x)
        while True:
            self.stopCondition.acquire()
            try:
                self.stopCondition.wait()
                break
            except (SystemExit, KeyboardInterrupt):
                break
            except Exception as x:
                logger.exception(x)
        self.isRunning.value = False

    def stop(self):
        self.isRunning.value = False
        self.stopCondition.acquire()
        self.stopCondition.notify()
        self.stopCondition.release()
示例#8
0
class RenderProcess:
    """
    Wraps a multiprocessing.Process for rendering. Assumes there
    is one MjSim per process.
    """
    def __init__(self, device_id, setup_sim, update_sim, output_var_shape):
        """
        Args:
        - device_id (int): GPU device to use for rendering (0-indexed)
        - setup_sim (callback): callback that is given a device_id and
            returns a MjSim. It is responsible for making MjSim render
            to given device.
        - update_sim (callback): callback given a sim and device_id, and
            should return a numpy array of shape `output_var_shape`.
        - output_var_shape (tuple): shape of the synchronized output
            array from `update_sim`.
        """
        self.device_id = device_id
        self.setup_sim = setup_sim
        self.update_sim = update_sim

        # Create a synchronized output variable (numpy array)
        self._shared_output_var = Array(ctypes.c_double,
                                        int(np.prod(output_var_shape)))
        self._output_var = np.frombuffer(self._shared_output_var.get_obj())

        # Number of variables used to communicate with process
        self._cv = Condition()
        self._ready = Value('b', 0)
        self._start = Value('b', 0)
        self._terminate = Value('b', 0)

        # Start the actual process
        self._process = Process(target=self._run)
        self._process.start()

    def wait(self):
        """ Wait for process to be ready for another update call. """
        with self._cv:
            if self._start.value:
                self._cv.wait()
            if self._ready.value:
                return
            self._cv.wait()

    def read(self, copy=False):
        """ Reads the output variable. Returns a copy if copy=True. """
        if copy:
            with self._shared_output_var.get_lock():
                return np.copy(self._output_var)
        else:
            return self._output_var

    def update(self):
        """ Calls update_sim asynchronously. """
        with self._cv:
            self._start.value = 1
            self._cv.notify()

    def stop(self):
        """ Tells process to stop and waits for it to terminate. """
        with self._cv:
            self._terminate.value = 1
            self._cv.notify()
        self._process.join()

    def _run(self):
        sim = self.setup_sim(self.device_id)

        while True:
            with self._cv:
                self._ready.value = 1
                self._cv.notify_all()

            with self._cv:
                if not self._start.value and not self._terminate.value:
                    self._cv.wait()
                if self._terminate.value:
                    break
                assert self._start.value
                self._start.value = 0

            # Run the update and assign output variable
            with self._shared_output_var.get_lock():
                self._output_var[:] = self.update_sim(sim,
                                                      self.device_id).ravel()
示例#9
0
class IODeviceManager(Thread):

    def __init__(self, a_device, a_kernel, std_in=StandardInput(), std_out=StandardOutput()):
        Thread.__init__(self)
        self.set_device(a_device)
        self.set_kernel(a_kernel)
        self.set_input(std_in)
        self.set_output(std_out)
        self.set_mutex(RLock())
        self.set_queue(SoQueue())
        self.device_is_in_use = Condition(self.get_mutex())
        self.the_queue_is_empty = Condition(self.get_mutex())

    def get_kernel(self):
        return self.kernel

    def set_kernel(self, a_kernel):
        self.kernel = a_kernel

    def set_input(self, a_input):
        self.std_in = a_input

    def get_input(self):
        return self.std_in

    def set_output(self, a_output):
        self.std_out = a_output

    def get_output(self):
        return self.std_out

    def get_mutex(self):
        return self.mutex
        
    def set_mutex(self, a_mutex):   
        self.mutex = a_mutex
        
    def get_queue(self):
        return self.queue
        
    def set_queue(self, a_queue):
        self.queue = a_queue
    
    def set_device(self, a_device):
        self.device = a_device
        self.get_device().set_device_manager(self)

    def get_device(self):
        return self.device
        
    def the_device_is_busy(self):
        with self.get_mutex():
            return not self.get_device().is_not_busy()
    
    def send_to_device(self):
        with self.device_is_in_use:
            while self.the_device_is_busy():
                self.device_is_in_use.wait()
            with self.get_mutex():
                self.get_device().set_pcb(self.get())
                self.get_device().process_pcb()

    def notify_that_the_device_is_not_in_use(self):
        with self.device_is_in_use:
            self.device_is_in_use.notify()
    
    def put(self, a_pcb):
        with self.the_queue_is_empty:
            with self.get_mutex():
                self.get_queue().add_pcb(a_pcb)
                self.the_queue_is_empty.notify()
        
    def get(self):
        with self.get_mutex():
            return self.get_queue().get_first()

    def queue_is_empty(self):
        return self.get_queue().is_empty()

    def send_io_end_interruption(self, a_pcb):
        self.get_kernel().get_irq_manager().handle(Irq(IO_END_INTERRUPT,  a_pcb))
            
    def run(self):
        while True:
            with self.the_queue_is_empty:
                while self.queue_is_empty():
                    self.the_queue_is_empty.wait()
                self.send_to_device()
class TModelPoolServer(TServer):
    ''' A server runs a pool of multiple models to serve requests
        Written by CongVm
    '''
    def __init__(self, handler, listModelConfig, *args, logger=None, timeout=0.1, batchSize=1):
        TServer.__init__(self, *args)
        self.timeout = timeout
        self.batchSize = batchSize
        if logger is not None:
            self.logger = logger
        else:
            self.logger = logging.getLogger(__name__)
        self.listModelConfig = listModelConfig
        self.handler = handler
        self.workers = []
        self.isRunning = Value('b', False)
        self.stopCondition = Condition()
        self.postForkCallback = None

    def setPostForkCallback(self, callback):
        if not callable(callback):
            raise TypeError("This is not a callback!")
        self.postForkCallback = callback

    def setListModelConfig(self, listModelConfig):
        """Set the number of worker threads that should be created"""
        self.listModelConfig = listModelConfig

    def workerProcess(self, kwargs):
        """Loop getting clients from the shared queue and process them"""
        # Init Processor here
        self.handlerInstance = self.handler(**kwargs)
        self.procInstance = self.processor(self.handlerInstance)

        if self.postForkCallback:
            self.postForkCallback()

        listClient = []
        t = time()
        while self.isRunning.value:
            try:
                client = self.serverTransport.accept()
                if not client:
                    continue
                listClient.append(client)
                if len(listClient) >= self.batchSize or time() - t >= self.timeout:
                    self.serveClient(self.procInstance, listClient)
                    listClient.clear()
                    t = time()
            except (KeyboardInterrupt, SystemExit):
                return 0
            except Exception as x:
                self.logger.exception(x)

    def parseClients(self, listClient):
        listOtrans = []
        listItrans = []
        listIprot = []
        listOprot = []
        for client in listClient:
            itrans = self.inputTransportFactory.getTransport(client)
            otrans = self.outputTransportFactory.getTransport(client)
            iprot = self.inputProtocolFactory.getProtocol(itrans)
            oprot = self.outputProtocolFactory.getProtocol(otrans)
            listOtrans.append(otrans)
            listItrans.append(itrans)
            listIprot.append(iprot)
            listOprot.append(oprot)
        return listOtrans, listItrans, listIprot, listOprot

    def serveClient(self, procInstance, listClient):
        """Process input/output from a client for as long as possible"""
        listOtrans, listItrans, listIprot, listOprot = self.parseClients(listClient)
        try:
            while True:
                procInstance.process(listIprot, listOprot)
        except TTransportException:
            pass

        except Exception as x:
            self.logger.exception(x)
            for itrans, otrans in zip(listItrans, listOtrans):
                itrans.close()
                otrans.close()

    def serve(self):
        """Start workers and put into queue"""
        # this is a shared state that can tell the workers to exit when False
        self.isRunning.value = True

        # first bind and listen to the port
        self.serverTransport.listen()

        # fork the children
        for modelConfig in self.listModelConfig:
            try:
                w = Process(target=self.workerProcess, args=(modelConfig, ))
                w.daemon = True
                w.start()
                self.workers.append(w)
            except Exception as x:
                self.logger.exception(x)

        # wait until the condition is set by stop()
        while True:
            self.stopCondition.acquire()
            try:
                self.stopCondition.wait()
                break
            except (SystemExit, KeyboardInterrupt):
                break
            except Exception as x:
                self.logger.exception(x)

        self.isRunning.value = False

    def stop(self):
        self.isRunning.value = False
        self.stopCondition.acquire()
        self.stopCondition.notify()
        self.stopCondition.release()
示例#11
0
class TProcessPoolServer(TServer):
    """Server with a fixed size pool of worker subprocesses to service requests

    Note that if you need shared state between the handlers - it's up to you!
    Written by Dvir Volk, doat.com
    """
    def __init__(self, *args):
        TServer.__init__(self, *args)
        self.numWorkers = 10
        self.workers = []
        self.isRunning = Value('b', False)
        self.stopCondition = Condition()
        self.postForkCallback = None

    def setPostForkCallback(self, callback):
        if not callable(callback):
            raise TypeError("This is not a callback!")
        self.postForkCallback = callback

    def setNumWorkers(self, num):
        """Set the number of worker threads that should be created"""
        self.numWorkers = num

    def workerProcess(self):
        """Loop getting clients from the shared queue and process them"""
        if self.postForkCallback:
            self.postForkCallback()

        while self.isRunning.value:
            try:
                client = self.serverTransport.accept()
                if not client:
                    continue
                self.serveClient(client)
            except (KeyboardInterrupt, SystemExit):
                return 0
            except Exception as x:
                logger.exception(x)

    def serveClient(self, client):
        """Process input/output from a client for as long as possible"""
        itrans = self.inputTransportFactory.getTransport(client)
        otrans = self.outputTransportFactory.getTransport(client)
        iprot = self.inputProtocolFactory.getProtocol(itrans)
        oprot = self.outputProtocolFactory.getProtocol(otrans)

        try:
            while True:
                self.processor.process(iprot, oprot)
        except TTransportException as tx:
            pass
        except Exception as x:
            logger.exception(x)

        itrans.close()
        otrans.close()

    def serve(self):
        """Start workers and put into queue"""
        # this is a shared state that can tell the workers to exit when False
        self.isRunning.value = True

        # first bind and listen to the port
        self.serverTransport.listen()

        # fork the children
        for i in range(self.numWorkers):
            try:
                w = Process(target=self.workerProcess)
                w.daemon = True
                w.start()
                self.workers.append(w)
            except Exception as x:
                logger.exception(x)

        # wait until the condition is set by stop()
        while True:
            self.stopCondition.acquire()
            try:
                self.stopCondition.wait()
                break
            except (SystemExit, KeyboardInterrupt):
                break
            except Exception as x:
                logger.exception(x)

        self.isRunning.value = False

    def stop(self):
        self.isRunning.value = False
        self.stopCondition.acquire()
        self.stopCondition.notify()
        self.stopCondition.release()
示例#12
0
class MultiProcessScheduler:
    def __init__(self):
        self.cond = Condition()  # default to RLock

        # If duplex is False then the pipe is unidirectional
        # conn1 for receiving messages and conn2 for sending messages.
        conn1, conn2 = Pipe(duplex=False)
        self.connREAD = conn1
        self.connWRITE = conn2

        # a holder to the closest task to execute
        # it is not safe to access this variable directly as
        # there might be data on the pipe, use self.__getClosestTask()
        self._closestTask = None

        # multiprocessing.Queue is used here to exchange task between the add
        # call and the service running __run() method
        self.queue = SimpleQueue()

        # dummy Process, the correct one will be created when the first
        # task is added
        self.service = Process()

    # TODO create destructor to avoid leaving with items on queue

    def __getClosestTask(self):
        '''
        return the closest task to execute (i.e., top on pq)
        '''
        if self.connREAD.poll():
            ret = None
            while self.connREAD.poll():
                ret = self.connREAD.recv()
            self._closestTask = ret
            print("[conn] closestTaskUpdate: ", self._closestTask)
        return self._closestTask

    def add(self, task):
        if type(task) is not Task:
            raise TypeError
        self.queue.put(task)
        if not self.service.is_alive():
            # it seams that Process.run() is a blocking call
            # so the only way to re-run the process is to create another one
            self.service = Process(target=MultiProcessScheduler.__run,
                                   args=(self.cond, self.queue,
                                         self.connWRITE),
                                   daemon=False)
            self.service.start()
        else:
            # notify the condition variable if the new task has the
            # closest execution time
            closestTask = self.__getClosestTask()
            if closestTask and task.time < closestTask.time:
                self.cond.acquire()
                self.cond.notify()
                self.cond.release()

    @staticmethod
    def __run(cond, queue, conn):
        tasksQueue = []
        print("[run] starting", queue.empty())
        while True:
            # remove tasks from queue and add to
            # internal priorityQueue (tasksQueue)
            while not queue.empty():
                task = queue.get()
                heappush(tasksQueue, task)
                print("[run] adding task to pq: ", task)

            # if there are task on the priority queue,
            # check when the closest one should be runned
            if tasksQueue:
                etime, _, _ = task = tasksQueue[0]
                now = time()
                if etime < now:
                    # only pop before running
                    # if a task is not being running in a given time,
                    # the next this loop runs that task might not be the
                    # closest one
                    _, fn, args = heappop(tasksQueue)
                    print("[run] running:", task)
                    p = Process(target=fn, args=args, daemon=False)
                    p.start()
                else:
                    delay = etime - now

                    print("[run] sleeping for ", delay, task)

                    # send the closest task to the pipe
                    conn.send(task)

                    cond.acquire()
                    cond.wait(timeout=delay)

            if not tasksQueue and queue.empty():
                # only stop the service if there are no task anwhere
                break
        print("[run] done")
示例#13
0
def solve(max_level, goal, num_workers):
    # prepare message queue shared with workers
    tasks = Queue()
    task_lock = Lock()
    task_cv = Condition(lock=task_lock)

    # create and start workers
    workers = []
    for i in range(0, num_workers):
        solutions = set()
        parent_conn, child_connn = Pipe()
        worker = Process(target=run_worker,
                         args=(child_connn, goal, max_level, tasks, task_lock,
                               task_cv))
        worker.start()
        workers.append((worker, parent_conn))

    # Find all possible sequences: [n0, n1, n2, ..., nM] (M=max_level)
    # where nX is the number of binary operators so that
    # '1 <n0 ops> 2 <n1 ops> 3 <n2 ops> ... M+1 <nM ops>' can be a valid
    # Reverse Polish Notation.  Key conditions are:
    # 1. n0 + n1 + ... + nM = M
    # 2. for any X, n0 + n1 + ... + nX <= X
    # (Note that from condition #2 n0 is always 0.)
    # We'll build the sequences in 'numops_list' below while exploring cases
    # in a BFS-like (or DP-like) manner.

    # This is a queue to maintain outstanding search results.  Its each element
    # is a tuple of 2 items: 'numops_list', 'total_ops'
    # A tuple of (N, T) means:
    # - N = [n0, n1, ..., nX]
    # - T = sum(N)
    # (Note that we don't necessarily have to keep T as it can be derived
    # from N.  But we do this for efficiency).
    # The search is completed when len(N) reaches M (i.e., X=M-1) by appending
    # the last item of nM = M - (n0 + n1 + ... + nX) = M - T (see condition #1).
    tmp = [([0], 0)]

    while tmp:
        numops_list, total_ops = tmp.pop(0)
        level = len(numops_list)
        if level < max_level:
            # Expand the sequence with all possible numbers of operators at
            # the current level so we can explore the next level for each of
            # them.
            for i in range(0, level - total_ops + 1):  # see condition #2
                tmp.append((numops_list + [i], total_ops + i))
        else:
            # Found one valid RPN template.  Pass it to workers and have them
            # work on it.
            numops_list.append(max_level - total_ops)
            with task_lock:
                tasks.put(numops_list)
                task_cv.notify()

    # Tell workers all data have been passed.
    solutions = set()
    with task_lock:
        for _ in workers:
            tasks.put(None)
        task_cv.notify_all()

    # Wait until all workers complete the tasks, while receiving any
    # intermediate and last solutions.  The received solutions may not
    # necessarily be fully unique, so we have to unify them here, again.
    # Received data of 'None' means the corresponding worker has completed
    # its task.
    # Note: here we assume all workers are reasonably equally active in
    # sending data, so we simply perform blocking receive.
    conns = set([w[1] for w in workers])
    while conns:
        for c in conns.copy():
            worker_data = c.recv()
            if worker_data is None:
                conns.remove(c)
                continue
            for solution in worker_data:
                if solution not in solutions:
                    solutions.add(solution)

    # All workers have completed.  Cleanup them and print the final unified
    # results.  If we are to show all expressions (i.e. goal is None), sort
    # results by the expressions' values (listing integers followed by all
    # non-integers, followed by 'divided by 0' cases.
    for w in workers:
        w[0].join()
    if goal is None:
        l = list(solutions)
        l.sort(
            key=lambda x: (0, x[0]) if type(x[0]) == int else (1, str(x[0])))
        for solution in l:
            print('%s = %s' % (solution[1], str(solution[0])))
    else:
        for solution in solutions:
            print(solution)
示例#14
0
from config import TASK_QUEUE_SIZE, UPDATE_TIME

if __name__ == "__main__":
    myip = getMyIP()
    DB_PROXY_NUM = Value('i', 0)
    q1 = Queue(maxsize=TASK_QUEUE_SIZE)
    q2 = Queue()
    sleep_condition = Condition(Lock())
    p0 = Process(target=start_api_server, args=(sleep_condition, ))
    p1 = Process(target=startProxyCrawl, args=(q1, DB_PROXY_NUM, myip, sleep_condition))
    p2 = Process(target=validator, args=(q1, q2, myip))
    p3 = Process(target=store_data, args=(q2, DB_PROXY_NUM))
    p0.start()
    p1.start()
    p2.start()
    p3.start()
    while True:
        now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
        print("sleep start:" + now)
        time.sleep(UPDATE_TIME)
        now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")
        print("sleep finish:" + now)
        sleep_condition.acquire()
        sleep_condition.notify()
        sleep_condition.release()

    p0.join()
    p1.join()
    p2.join()
    p3.join()
    :param num: die Prozessnummer
    :return: None
    """
    with condition:
        # Auf Signal warten
        condition.wait()
        with open("foo.txt", "ab") as fo:
            for i in range(100):
                fo.write("Process ".encode())
                fo.flush()
                fo.write(str(num).encode())
                fo.flush()
                fo.write(": Hello!\n".encode())
                fo.flush()
        condition.notify()


if __name__ == "__main__":
    # File loeschen, falls es bereits existiert
    if os.path.isfile("foo.txt"):
        os.remove("foo.txt")
    condition = Condition()
    # Zehn Prozesse erstellen und starten
    for num in range(10):
        Process(target=sayhello, args=(condition, num)).start()
    time.sleep(1)
    # Den Vorgang "anstossen", da ja alle Prozesse
    # zu Beginn warten
    with condition:
        condition.notify()
示例#16
0
class DataLoader:
    """ Class for loading data
    Attributes:
        num_processor: an integer indicating the number of processors 
            for loading the data, normally 4 is enough
        capacity: an integer indicating the capacity of the data load
            queue, default set to 10
        batch_size: an integer indicating the batch size for each 
            extraction from the data load queue
        phase: an string indicating the phase of the data loading process,
            can only be 'train' or 'test'
    """
    def __init__(self,
                 num_processor,
                 batch_size,
                 phase,
                 batch_idx_init=0,
                 data_ids_init=train_ids,
                 capacity=10):
        self.num_processor = num_processor
        self.batch_size = batch_size
        self.data_load_capacity = capacity
        self.manager = Manager()
        self.batch_lock = Lock()
        self.mutex = Lock()
        self.cv_full = Condition(self.mutex)
        self.cv_empty = Condition(self.mutex)
        self.data_load_queue = self.manager.list()
        self.cur_batch = self.manager.list([batch_idx_init])
        self.processors = []
        if phase == 'train':
            self.data_ids = self.manager.list(data_ids_init)
        elif phase == 'test':
            self.data_ids = self.manager.list(test_ids)
        else:
            raise ValueError('Could not set phase to %s' % phase)

    def __load__(self):
        while True:
            image_dicts = []
            self.batch_lock.acquire()
            image_ids = self.data_ids[self.cur_batch[0] *
                                      self.batch_size:(self.cur_batch[0] + 1) *
                                      self.batch_size]
            self.cur_batch[0] += 1
            if (self.cur_batch[0] + 1) * self.batch_size >= len(self.data_ids):
                self.cur_batch[0] = 0
                random.shuffle(self.data_ids)
            self.batch_lock.release()

            self.cv_full.acquire()
            if len(self.data_load_queue) > self.data_load_capacity:
                self.cv_full.wait()
            self.data_load_queue.append(get_data(image_ids))
            self.cv_empty.notify()
            self.cv_full.release()

    def start(self):
        for _ in range(self.num_processor):
            p = Process(target=self.__load__)
            p.start()
            self.processors.append(p)

    def get_batch(self):
        self.cv_empty.acquire()
        if len(self.data_load_queue) == 0:
            self.cv_empty.wait()
        batch_data = self.data_load_queue.pop()
        self.cv_full.notify()
        self.cv_empty.release()
        return batch_data

    def get_status(self):
        self.batch_lock.acquire()
        current_cur_batch = self.cur_batch[0]
        current_data_ids = self.data_ids
        self.batch_lock.release()
        return {
            'batch_idx': int(current_cur_batch),
            'data_ids': list(current_data_ids)
        }

    def stop(self):
        for p in self.processors:
            p.terminate()
示例#17
0
def solve(max_level, goal, num_workers):
    # prepare message queue shared with workers
    tasks = Queue()
    task_lock = Lock()
    task_cv = Condition(lock=task_lock)

    # create and start workers
    workers = []
    for i in range(0, num_workers):
        solutions = set()
        parent_conn, child_connn = Pipe()
        worker = Process(target=run_worker,
                         args=(child_connn, goal, max_level, tasks,
                               task_lock, task_cv))
        worker.start()
        workers.append((worker, parent_conn))

    # Find all possible sequences: [n0, n1, n2, ..., nM] (M=max_level)
    # where nX is the number of binary operators so that
    # '1 <n0 ops> 2 <n1 ops> 3 <n2 ops> ... M+1 <nM ops>' can be a valid
    # Reverse Polish Notation.  Key conditions are:
    # 1. n0 + n1 + ... + nM = M
    # 2. for any X, n0 + n1 + ... + nX <= X
    # (Note that from condition #2 n0 is always 0.)
    # We'll build the sequences in 'numops_list' below while exploring cases
    # in a BFS-like (or DP-like) manner.

    # This is a queue to maintain outstanding search results.  Its each element
    # is a tuple of 2 items: 'numops_list', 'total_ops'
    # A tuple of (N, T) means:
    # - N = [n0, n1, ..., nX]
    # - T = sum(N)
    # (Note that we don't necessarily have to keep T as it can be derived
    # from N.  But we do this for efficiency).
    # The search is completed when len(N) reaches M (i.e., X=M-1) by appending
    # the last item of nM = M - (n0 + n1 + ... + nX) = M - T (see condition #1).
    tmp = [([0], 0)]

    while tmp:
        numops_list, total_ops = tmp.pop(0)
        level = len(numops_list)
        if level < max_level:
            # Expand the sequence with all possible numbers of operators at
            # the current level so we can explore the next level for each of
            # them.
            for i in range(0, level - total_ops + 1): # see condition #2
                tmp.append((numops_list + [i], total_ops + i))
        else:
            # Found one valid RPN template.  Pass it to workers and have them
            # work on it.
            numops_list.append(max_level - total_ops)
            with task_lock:
                tasks.put(numops_list)
                task_cv.notify()

    # Tell workers all data have been passed.
    solutions = set()
    with task_lock:
        for _ in workers:
            tasks.put(None)
        task_cv.notify_all()

    # Wait until all workers complete the tasks, while receiving any
    # intermediate and last solutions.  The received solutions may not
    # necessarily be fully unique, so we have to unify them here, again.
    # Received data of 'None' means the corresponding worker has completed
    # its task.
    # Note: here we assume all workers are reasonably equally active in
    # sending data, so we simply perform blocking receive.
    conns = set([w[1] for w in workers])
    while conns:
        for c in conns.copy():
            worker_data = c.recv()
            if worker_data is None:
                conns.remove(c)
                continue
            for solution in worker_data:
                if solution not in solutions:
                    solutions.add(solution)

    # All workers have completed.  Cleanup them and print the final unified
    # results.  If we are to show all expressions (i.e. goal is None), sort
    # results by the expressions' values (listing integers followed by all
    # non-integers, followed by 'divided by 0' cases.
    for w in workers:
        w[0].join()
    if goal is None:
        l = list(solutions)
        l.sort(key=lambda x: (0, x[0]) if type(x[0]) == int else (1, str(x[0])))
        for solution in l:
            print('%s = %s' % (solution[1], str(solution[0])))
    else:
        for solution in solutions:
            print(solution)
示例#18
0
class Orchestrator:
    def __init__(self,
                 submission_queue: multiprocessing.Queue,
                 status_provider: BatchStatusProvider,
                 config_file: str,
                 strict_config: bool,
                 log_folder: str,
                 cache_search_dirs: List[str],
                 log_event_que: LogEventQueue,
                 singleton_run_summary_path: Optional[str] = None):

        self._submission_que: multiprocessing.Queue = submission_queue
        self._status_provider: BatchStatusProvider = status_provider
        self._config_file: str = config_file
        self._strict_config: bool = strict_config
        self._log_folder: str = log_folder
        self._cache_search_dirs = cache_search_dirs
        self._log_event_que = log_event_que
        self._singleton_run_summary_path = singleton_run_summary_path
        self._on_batch_id = -1
        self._on_batch_type: type = type(None)

        self._master_thread = Thread(target=self._master_thread_loop,
                                     name="OrchestratorMasterThread",
                                     args=(()),
                                     daemon=True)

        self._run_summary_thread_gate = Event()
        self._run_summary_thread = Thread(target=self._run_summary_loop,
                                          name="OrchestratorRunSummaryThread",
                                          args=(()),
                                          daemon=True)

        self.__debug_loop_thread = Thread(target=self.__debug_loop,
                                          name="OrchestratorDebugLoop",
                                          args=(()),
                                          daemon=True)

        #TODO(andwald): The following hypothetical thread dynamically sets RTF and Concurrency of EndpointManagers
        #               according to its own decoupled logic. This will be nice and pluggable since EndpointManagers
        #               already adhere to whatever the dynamic settings are for the Atomic Shared Variables of
        #               RTF and Concurrency, which is what this thread will manipulate.
        # self._perf_thread = Thread(target=self.perf_thread_loop, name="OrchestratorPerfThread", args=(self,), daemon=True)

        self._file_queue = Queue()
        self._file_queue_size = 0
        self._in_progress: Dict[str, WorkItemRequest] = {
        }  # WorkItemRequest.filepath -> WorkItemRequest
        self._in_progress_owner: Dict[str, EndpointManager] = {
        }  # WorkItemRequest.filepath -> EndpointManager
        self._work_results: Dict[str, WorkItemResult] = {
        }  # WorkItemRequest.filepath -> WorkItemResult
        self._batch_completion_evt = Event()
        self._accounting_lock = RLock()
        self._file_queue_cond = Condition(self._accounting_lock)
        self._run_summary_lock = Lock()
        self._misc_lock = Lock()
        self._summarizer: BatchRunSummarizer = None
        self._stop_requested = False

        self._endpoint_managers: List[EndpointManager] = []
        self._endpoint_generation = 0
        self._old_managers = set(
        )  # Set[str], contains names of now-inactive endpoint managers
        self._config_notifier: ThreadedNotifier = \
            notify_file_modified(self._config_file, self.hotswap_endpoint_managers, self._log_event_que)

        self._start_time = time.time()
        self._creator_pid = current_process().pid
        logger.info("Orchestrator created by process: {0}".format(
            self._creator_pid))
        self.__cnt_work_success_cb = 0
        self.__cnt_work_failure_cb = 0

        self._master_thread.start()
        self._run_summary_thread.start()
        # self.__debug_loop_thread.start()  # Enable to debug concurrency changes.

    def is_alive(self):
        return self._master_thread.is_alive()

    def join(self):
        self._master_thread.join()

    def _run_summary_loop(self):
        while not self._stop_requested:
            # Prevent redundant updates when nothing can change.
            self._run_summary_thread_gate.wait()
            if self._stop_requested:
                return

            if self._on_batch_id > -1 and self._summarizer:
                try:
                    self.write_summary_information(write_run_summary=True,
                                                   log_conclusion_msg=False)

                # Don't ever let this thread die as it's too important.
                # Log and re-try. Repetitive failure loop will at least get logged.
                except Exception as e:
                    exception_details = traceback.format_exc()
                    logger.error(
                        "Orchestrator: run_summary_thread in run_summary_loop(): "
                        "Caught {0}, \nDetails: {1}".format(
                            type(e).__name__, exception_details))

            time.sleep(RUN_SUMMARY_LOOP_INTERVAL)

    def __debug_loop(self):
        """
        This is only intended to be used during development and debugging.
        """
        def _check_lock_acq(lock):
            acquired = lock.acquire(block=False)
            if acquired:
                lock.release()
                return False
            # We weren't able to acquire, so it's taken
            return True

        # Loop forever. This is a daemonic thread and it will intentionally
        # only die when the process owning Orchestrator dies.
        last_cnt_work_success = 0
        while True:
            logger.debug("Stop requested: {0}".format(self._stop_requested))
            logger.debug("Batch que size: {0}".format(
                self._submission_que.qsize()))
            logger.debug("On batch id: {0}".format(self._on_batch_id))
            logger.debug("File queue size: {0}".format(self._file_queue_size))
            logger.debug("Num in progress: {0}".format(len(self._in_progress)))
            logger.debug("Orchestrator accounting lock taken: {0}".format(
                _check_lock_acq(self._accounting_lock)))
            logger.debug("Status provider accounting lock taken: {0}".format(
                _check_lock_acq(BatchStatusProvider.lock)))
            logger.debug(
                "Notify work success callback entry count: {0}".format(
                    self.__cnt_work_success_cb))
            logger.debug(
                "Work items completed since last debug print: {0}".format(
                    self.__cnt_work_success_cb - last_cnt_work_success))
            last_cnt_work_success = self.__cnt_work_success_cb
            logger.debug(
                "Notify work failure callback entry count: {0}".format(
                    self.__cnt_work_failure_cb))
            logger.debug("Run summary thread alive: {0}".format(
                self._run_summary_thread.is_alive()))
            logger.debug("Number of old endpoint managers: {0}".format(
                len(self._old_managers)))
            for epm in self._endpoint_managers:
                logger.debug("Endpoint manager: {0}".format(epm.name))
                logger.debug("   Current requests: {0}".format(
                    epm._current_requests))
                logger.debug("   Current requests lock taken: {0}".format(
                    _check_lock_acq(epm._current_requests_lock)))
                logger.debug("   Pool apply_async count: {0}".format(
                    epm._cnt_apply_async))
                logger.debug("   Pool callback count: {0}".format(
                    epm._cnt_pool_cb))
                logger.debug("   Pool callback returns count: {0}".format(
                    epm._cnt_pool_cb_rets))
                logger.debug("   Stop requested: {0}".format(
                    epm._stop_requested))
                logger.debug("   Now trying to steal work: {0}".format(
                    epm._in_steal_work_fn))
            logger.debug("Stack frames of all threads:")
            logger.debug("\n*** STACKTRACE - START ***\n")
            current_threads_stacktrace(use_logger=True)
            logger.debug("\n*** STACKTRACE - END ***\n")
            time.sleep(DEBUG_LOOP_INTERVAL)

    def write_summary_information(self,
                                  write_run_summary: bool = True,
                                  log_conclusion_msg: bool = False,
                                  allow_fail: bool = False):
        """
        Summarize individual file results, along with overall results,
        and write them to log and/or file. Also log a conclusion message
        if requested.
        :param write_run_summary: whether run summary (individual files + overall)
                                  should be written to file.
        :param log_conclusion_msg: whether a conclusion message should be logged
                                   which includes final stats and lists failures.
        :param allow_fail: log failure to write run summary but do not raise exception.
        """
        # To ensure history serialization, we wrap this method
        # in its own lock that nobody else contends with except for
        # the threads that invoke this.
        with self._run_summary_lock:

            # Take a consistent snapshot and then report on the snapshot
            # without holding back forward progress.
            with self._accounting_lock:
                snap_work_results: Dict[str, WorkItemResult] = copy.deepcopy(
                    self._work_results)
                snap_file_queue_size: int = self._file_queue_size
                snap_num_running: int = len(self._in_progress)
                snap_run_summarizer: BatchRunSummarizer = self._summarizer
                snap_batch_id: int = self._on_batch_id

            summary_json = {}
            # It's uncommon that a run summarizer wouldn't be available yet but this could
            # happen for example by signaling early termination to the Orchestrator.
            if snap_run_summarizer:
                summary_json = snap_run_summarizer.run_summary(
                    snap_work_results, snap_file_queue_size,
                    snap_num_running, self._start_time,
                    len(self._endpoint_managers), log_conclusion_msg)

            # Write the summary json file
            if write_run_summary:
                try:
                    if self._singleton_run_summary_path:
                        logger.debug(
                            "Updating singleton run_summary: {0}".format(
                                self._singleton_run_summary_path))
                        write_json_file_atomic(
                            summary_json, self._singleton_run_summary_path)
                    else:
                        try:
                            self._status_provider.set_run_summary(
                                snap_batch_id, summary_json)
                        except BatchNotFoundException:
                            # This is benign and means we caught a rare race condition
                            # in which the batch directory is very recently deleted.
                            pass
                    # Minimal throttle on file writes. We are under _run_summary_lock.
                    time.sleep(3)
                except Exception as e:
                    logger.warning("Failed to write run_summary: {0}".format(
                        str(e)))
                    if not allow_fail:
                        raise

    def request_stop(self):
        """
        Arrange for conditions that will lead to a fast conclusion
        of any ongoing batch without finishing whatever is remaining or
        in progress in this batch if any. This will also cause
        EndpointManagers to shut down. Orchestrator's join() is
        guaranteed to eventually return.
        """
        # Assume this might be called from a signal handler.
        # Instead of preventing child proc inheritance of signals,
        # we eliminate any leaky abstractions by permitting children
        # and those who spawn them to be completely blameless
        # for creating unexpected conditions.
        if current_process().pid != self._creator_pid:
            return

        with self._misc_lock:
            try:
                if self._config_notifier:
                    self._config_notifier.stop()
                    self._config_notifier = None
            except OSError as e:
                # ThreadedNotifier.stop() is not idempotent and gives
                # errno EBADF if it is already stopped.
                if e.errno != errno.EBADF:
                    raise

        # A couple facts about Python3 in case there is any concern
        # about being invoked by a signal handler.
        # 1 - Only the main thread of a process can handle
        # signals, so now we know we are the main thread of the
        # creator process in the signal case.
        # 2 - When running a signal handler, the main thread is
        # is still subject to preemption at tick and the GIL
        # can still be released for other threads. This means
        # that picking up the lock here cannot create deadlock,
        # unless the main thread itself was holding the lock before
        # the signal. That's why we use ReentrantLock.
        with self._accounting_lock:
            self._stop_requested = True
            while self._file_queue_size > 0:
                self._file_queue.get()
                self._file_queue_size -= 1
            self._submission_que.put(None)
            self._file_queue_cond.notify_all()
            self._batch_completion_evt.set()
            for m in self._endpoint_managers:
                m.request_stop()
            self._run_summary_thread_gate.set()

    def steal_work(self, manager: EndpointManager) -> WorkItemRequest:
        """
        :param manager: the EndpointManager who is trying to steal work.
        :returns str: audio file of work to do
        """
        sentinel = SentinelWorkItemRequest()

        # Classic consumer waiter pattern using condition variable.
        self._accounting_lock.acquire()
        while True:
            if manager.name in self._old_managers or self._stop_requested:
                work = sentinel
                break
            if self._file_queue_size > 0:
                work: WorkItemRequest = self._file_queue.get()
                self._file_queue_size -= 1

                # Eliminate this manager early if we detect a language mismatch.
                # It will be recreated on a new batch.
                if work.language and manager.endpoint_config["language"].lower(
                ) != work.language.lower():
                    self._file_queue.put(
                        work)  # back on queue for someone qualified
                    self._file_queue_size += 1
                    self._file_queue_cond.notify()
                    work = sentinel
                    break

                # Got some work to do!
                self._in_progress[work.filepath] = work
                self._in_progress_owner[work.filepath] = manager
                break
            else:
                # Back to sleep because we got nothing.
                self._file_queue_cond.wait(
                )  # implicit self.accounting_lock.release()
        self._accounting_lock.release()
        return work

    def _merge_results(self, filepath: str, result: WorkItemResult):
        if filepath not in self._work_results:
            self._work_results[filepath] = result
        else:
            prev_attempts = self._work_results[filepath].attempts
            result.attempts += prev_attempts
            self._work_results[filepath] = result

    def notify_work_success(self, filepath: str, manager: EndpointManager,
                            result: WorkItemResult):
        with self._accounting_lock:
            self.__cnt_work_success_cb += 1
            if manager.name in self._old_managers:
                # The AudioFileWork item would already be back in pending
                # or running by someone else or finished. Covers an uncommon race.
                return
            if self._stop_requested:
                # It's too late for updating batch status and we're about to die.
                return
            del self._in_progress[filepath]
            del self._in_progress_owner[filepath]

            self._merge_results(filepath, result)

            # Did we just finish the batch?
            if self._file_queue_size == 0 and len(self._in_progress) == 0:
                self._batch_completion_evt.set()

    def notify_work_failure(self, filepath: str, manager: EndpointManager,
                            result: WorkItemResult):
        with self._accounting_lock:
            self.__cnt_work_failure_cb += 1
            if manager.name in self._old_managers:
                # The WorkItemResult would already be back in pending
                # or running by someone else or finished. Covers an uncommon race.
                return
            if self._stop_requested:
                # It's too late for updating batch status and we're about to die.
                return

            self._merge_results(filepath, result)

            # Do we give it another chance?
            # Check retry-ability and num retries burned already.
            if result.can_retry and \
                    self._work_results[filepath].attempts - 1 < ORCHESTRATOR_SCOPE_MAX_RETRIES:
                self._log_event_que.debug(
                    "Placed work item {0} back into queue since retriable.".
                    format(filepath))
                self._file_queue.put(self._in_progress[filepath])
                self._file_queue_size += 1
                self._file_queue_cond.notify()
            # Else no more retries.
            # Either way the item is no longer in progress.
            del self._in_progress[filepath]
            del self._in_progress_owner[filepath]

            # Did we just finish the batch? E.g. finally gave up on this work
            # item and that so happens to be the last in the batch.
            if self._file_queue_size == 0 and len(self._in_progress) == 0:
                self._batch_completion_evt.set()

    def hotswap_endpoint_managers(self):
        config_data = load_configuration(self._config_file,
                                         self._strict_config)

        with self._accounting_lock:
            if self._stop_requested:
                return

            # Get the unique generation of these endpoint managers, which
            # is useful for both debugging and logging.
            gen = self._endpoint_generation
            self._endpoint_generation += 1

            # Get an EndpointStatusChecker for the type of the
            # BatchRequest that is currently being processed.
            ep_status_checker: EndpointStatusChecker
            if isinstance(None, self._on_batch_type):
                ep_status_checker = UnknownEndpointStatusChecker(
                    self._log_event_que)
            else:
                ep_status_checker = self._on_batch_type.get_endpoint_status_checker(
                    self._log_event_que)

            try:
                # Determine EndpointManagers that need to be deleted (modified, new,
                # or no longer exist). Do not touch EndpointManagers that have not changed.
                new_em_objs: List[EndpointManager] = []
                # Start by assuming every EndpointManager needs to be deleted.
                deleted_managers: Dict[str, EndpointManager] = \
                    {em.endpoint_name: em for em in self._endpoint_managers}

                for endpoint_name, endpoint_config in config_data.items():
                    # If an existing endpoint is totally preserved in the new config, don't delete it.
                    # Also require that the endpoint's manager is not terminally stopped, otherwise we need
                    # a new instance of it anyway.
                    if endpoint_name in deleted_managers and \
                      endpoint_config == deleted_managers[endpoint_name].endpoint_config and \
                      not deleted_managers[endpoint_name]._stop_requested:  # noqa
                        # Don't delete this EndpointManager and don't make a new one.
                        del deleted_managers[endpoint_name]
                        continue

                    new_em_objs.append(
                        EndpointManager(
                            "HotswapGen{0}_{1}".format(str(gen),
                                                       endpoint_name),
                            endpoint_name,
                            endpoint_config,
                            self._log_folder,
                            self._log_event_que,
                            self._cache_search_dirs,
                            # on EndpointManager has capacity to steal work
                            self.steal_work,
                            # on EndpointManager reports success
                            self.notify_work_success,
                            # on EndpointManager reports failure
                            self.notify_work_failure,
                            ep_status_checker,
                        ))
            # Validation of the config could fail or invalid yaml may have been given, etc.
            # We catch anything so that we may permit another attempt later with a proper config file.
            # We report it in the logs and somewhere else we will die if no forward progress for too long.
            except Exception as e:
                exception_details = traceback.format_exc()
                logger.error(
                    "Caught Exception '{0}' reading config. Details: {1}\n{2}".
                    format(type(e).__name__, str(e), exception_details))
                # Don't proceed to stop the old EndpointManagers because they're all we've got to go on.
                return
            if self._stop_requested:
                return

            # Also swap the EndpointManagers under lock in case of race.
            # First stop the old EndpointManagers to be deleted.
            for m in self._endpoint_managers:
                if m.endpoint_name in deleted_managers:
                    self._old_managers.add(m.name)
                    m.request_stop()

            # Un-assign work in progress for deleted EndpointManagers.
            # Now anything the old managers might still callback
            # would be rejected so we can safely move in progress back to queue.
            work_in_progress = {k: v
                                for k, v in self._in_progress.items()
                                }  # shallow copy
            work_in_progress_owner = {
                k: v
                for k, v in self._in_progress_owner.items()
            }  # shallow copy
            for filepath, work_item in self._in_progress.items():
                owner_endpoint_name = self._in_progress_owner[
                    filepath].endpoint_name
                # If the EndpointManager that owns this work item is being deleted,
                # free up the work item.
                if owner_endpoint_name in deleted_managers:
                    del work_in_progress[filepath]
                    del work_in_progress_owner[filepath]
                    self._file_queue.put(work_item)
                    self._file_queue_size += 1
            self._in_progress = work_in_progress
            self._in_progress_owner = work_in_progress_owner

            # We've potentially repopulated the file_queue.
            self._file_queue_cond.notify_all()

            # Start the new EndpointManagers.
            for m in new_em_objs:
                m.start()

            # Record the latest set of all EndpointManagers.
            self._endpoint_managers = \
                [em for em in self._endpoint_managers
                 if em.endpoint_name not in deleted_managers] + \
                new_em_objs

            # Ensure that they are all using the correct type of EndpointStatusChecker
            # which depends on the subtype of BatchRequest we are currently processing.
            for m in self._endpoint_managers:
                m.set_endpoint_status_checker(ep_status_checker)

        logger.info(
            "Set new EndpointManagers after hot-swap: {0}".format(config_data))

    def _master_finalize(self):
        """
        Work to be done before Orchestrator's master thread exits.
        """
        # Log conclusion of run_summary information if at singleton level.
        if self._singleton_run_summary_path:
            self.write_summary_information(write_run_summary=False,
                                           log_conclusion_msg=True)

    def _master_thread_loop(self):

        # Keep doing batches until given a stop request.
        while True:
            # Starting a new batch.
            request: BatchRequest = self._submission_que.get()
            with self._accounting_lock:
                self._on_batch_type = type(request)

            # Recreate the endpoints on start of a new batch in case
            # the last batch disabled endpoints, e.g. for mismatched
            # language or other reasons.
            self.hotswap_endpoint_managers()

            with self._accounting_lock:
                if self._stop_requested:
                    self._master_finalize()
                    return

                # Starting a new batch.
                # Reset record keeping if it's not singleton run summary.
                if self._singleton_run_summary_path is None:
                    self._work_results = {}
                self._summarizer = request.get_batch_run_summarizer()

                logger.info("Orchestrator: Starting batch {0}".format(
                    request.batch_id))
                self._status_provider.change_status_enum(
                    request.batch_id, BatchStatusEnum.running)
                self._on_batch_id = request.batch_id
                self._batch_completion_evt.clear()
                self._run_summary_thread_gate.set()
                assert len(self._in_progress) == 0
                assert self._file_queue_size == 0

                for work in request.make_work_items(
                        self._status_provider.batch_base_path(
                            request.batch_id), self._cache_search_dirs,
                        self._log_folder):
                    self._file_queue.put(work)
                    self._file_queue_size += 1
                self._file_queue_cond.notify_all()

            # Wait for batch completion or early stop request. In both cases,
            # nothing is in progress and nothing is in queue when we're woken.
            self._batch_completion_evt.wait()
            logger.info("Orchestrator: Completed batch {0}".format(
                request.batch_id))

            # Report per-batch final run_summary.
            if self._singleton_run_summary_path is None:
                self.write_summary_information(write_run_summary=True,
                                               log_conclusion_msg=True,
                                               allow_fail=True)
            # Even with singleton run_summary, we should update run_summary file
            # now but not log conclusion.
            else:
                self.write_summary_information(write_run_summary=True,
                                               log_conclusion_msg=False,
                                               allow_fail=True)

            # Concatenate batch-level results to single file.
            if request.combine_results:
                write_single_output_json(
                    request.files,
                    self._status_provider.batch_base_path(request.batch_id))

            # Intentionally change status enum last so that above results committed first
            # for any event-driven observers.
            self._status_provider.change_status_enum(request.batch_id,
                                                     BatchStatusEnum.done)
            logger.info(
                "Orchestrator: Updated batch status to Done: {0}".format(
                    request.batch_id))

            # As another batch may not show up for a while (or never), stop the periodic
            # run summary thread since no new information to report.
            self._run_summary_thread_gate.clear()
示例#19
0
文件: ui.py 项目: sq3/bundlewrap
class IOManager(object):
    def __init__(self):
        self.capture_mode = False
        self.child_mode = False
        self.parent_mode = False

    def activate_as_child(self, output_lock, output_queue, status_line_cleared):
        self.parent_mode = False
        self.child_mode = True
        self.status_line_cleared = status_line_cleared
        self.output_lock = output_lock
        self.output_queue = output_queue

    def activate_as_parent(self, debug=False):
        assert not self.child_mode
        self.debug_mode = debug
        self.jobs = []
        self.output_lock = Lock()
        self.parent_mode = True
        self.output_queue = Queue()
        self.status_line_cleared = Condition()
        self.thread = Thread(target=self._print_thread)
        self.thread.daemon = True
        self.thread.start()

    def ask(self, question, default, get_input=input_function):
        answers = _("[Y/n]") if default else _("[y/N]")
        question = question + " " + answers + " "
        with self.lock:
            while True:
                STDOUT_WRITER.write("\a")
                STDOUT_WRITER.write(question)
                STDOUT_WRITER.flush()

                answer = get_input()
                if answer.lower() in (_("y"), _("yes")) or (
                    not answer and default
                ):
                    return True
                elif answer.lower() in (_("n"), _("no")) or (
                    not answer and not default
                ):
                    return False
                STDOUT_WRITER.write(_("Please answer with 'y(es)' or 'n(o)'.\n"))

    @contextmanager
    def capture(self):
        self.capture_mode = True
        self.captured_io = {
            'stderr': "",
            'stdout': "",
        }
        yield self.captured_io
        self.capture_mode = False

    @property
    def child_parameters(self):
        return (self.output_lock, self.output_queue, self.status_line_cleared)

    def debug(self, msg):
        self.output_queue.put({'msg': 'LOG', 'log_type': 'DBG', 'text': msg})

    def job_add(self, msg):
        self.output_queue.put({'msg': 'LOG', 'log_type': 'JOB_ADD', 'text': msg})

    def job_del(self, msg):
        self.output_queue.put({'msg': 'LOG', 'log_type': 'JOB_DEL', 'text': msg})

    def stderr(self, msg):
        self.output_queue.put({'msg': 'LOG', 'log_type': 'ERR', 'text': msg})

    def stdout(self, msg):
        self.output_queue.put({'msg': 'LOG', 'log_type': 'OUT', 'text': msg})

    @contextmanager
    def job(self, job_text):
        self.job_add(job_text)
        yield
        self.job_del(job_text)

    @property
    @contextmanager
    def lock(self):
        with self.output_lock:
            self.status_line_cleared.wait()
            yield

    def _print_thread(self):
        assert self.parent_mode
        while True:
            if self.output_lock.acquire(False):
                msg = self.output_queue.get()
                if msg['log_type'] == 'QUIT':
                    break
                if self.debug_mode and msg['log_type'] in ('OUT', 'DBG', 'ERR'):
                    msg['text'] = datetime.now().strftime("[%Y-%m-%d %H:%M:%S.%f] ") + msg['text']
                if self.jobs and TTY:
                    self._write("\r\033[K")
                if msg['log_type'] == 'OUT':
                    self._write(msg['text'] + "\n")
                elif msg['log_type'] == 'ERR':
                    self._write(msg['text'] + "\n", err=True)
                elif msg['log_type'] == 'DBG' and self.debug_mode:
                    self._write(msg['text'] + "\n")
                elif msg['log_type'] == 'JOB_ADD' and TTY:
                    self.jobs.append(msg['text'])
                elif msg['log_type'] == 'JOB_DEL' and TTY:
                    self.jobs.remove(msg['text'])
                if self.jobs and TTY:
                    self._write("[status] " + self.jobs[0])
                self.output_lock.release()
            else:  # someone else is holding the output lock
                # the process holding the lock should now be waiting for
                # us to remove any status lines present before it starts
                # printing
                if self.jobs and TTY:
                    self._write("\r\033[K")
                self.status_line_cleared.notify()
                # now we wait until the other process has finished and
                # released the output lock
                self.output_lock.acquire()
                self.output_lock.release()

    def shutdown(self):
        assert self.parent_mode
        self.output_queue.put({'msg': 'LOG', 'log_type': 'QUIT'})
        self.thread.join()

    def _write(self, msg, err=False):
        write_to_stream(STDERR_WRITER if err else STDOUT_WRITER, msg)
        if self.capture_mode:
            self.captured_io['stderr' if err else 'stdout'] += msg
class TProcessPoolServer(TServer):

    """
    Server with a fixed size pool of worker subprocesses which service requests.
    Note that if you need shared state between the handlers - it's up to you!
    Written by Dvir Volk, doat.com
    """

    def __init__(self, * args):
        TServer.__init__(self, *args)
        self.numWorkers = 10
        self.workers = []
        self.isRunning = Value('b', False)
        self.stopCondition = Condition()
        self.postForkCallback = None

    def setPostForkCallback(self, callback):
        if not callable(callback):
            raise TypeError("This is not a callback!")
        self.postForkCallback = callback

    def setNumWorkers(self, num):
        """Set the number of worker threads that should be created"""
        self.numWorkers = num

    def workerProcess(self):
        """Loop around getting clients from the shared queue and process them."""

        if self.postForkCallback:
            self.postForkCallback()

        while self.isRunning.value == True:
            try:
                client = self.serverTransport.accept()
                self.serveClient(client)
            except (KeyboardInterrupt, SystemExit):
                return 0
            except (Exception) as x:
                logging.exception(x)

    def serveClient(self, client):
        """Process input/output from a client for as long as possible"""
        itrans = self.inputTransportFactory.getTransport(client)
        otrans = self.outputTransportFactory.getTransport(client)
        iprot = self.inputProtocolFactory.getProtocol(itrans)
        oprot = self.outputProtocolFactory.getProtocol(otrans)

        try:
            while True:
                self.processor.process(iprot, oprot)
        except (TTransportException) as tx:
            pass
        except (Exception) as x:
            logging.exception(x)

        itrans.close()
        otrans.close()


    def serve(self):
        """Start a fixed number of worker threads and put client into a queue"""

        #this is a shared state that can tell the workers to exit when set as false
        self.isRunning.value = True

        #first bind and listen to the port
        self.serverTransport.listen()

        #fork the children
        for i in range(self.numWorkers):
            try:
                w = Process(target=self.workerProcess)
                w.daemon = True
                w.start()
                self.workers.append(w)
            except (Exception) as x:
                logging.exception(x)

        #wait until the condition is set by stop()

        while True:

            self.stopCondition.acquire()
            try:
                self.stopCondition.wait()
                break
            except (SystemExit, KeyboardInterrupt):
		break
            except (Exception) as x:
                logging.exception(x)

        self.isRunning.value = False

    def stop(self):
        self.isRunning.value = False
        self.stopCondition.acquire()
        self.stopCondition.notify()
        self.stopCondition.release()
示例#21
0
文件: Cpu.py 项目: danwyryunq/TPSOUNQ
class Cpu(object):

    def __init__(self):

        self.pcb = None

        self.__mutex = RLock()
        self.__pcb_not_set = Condition(self.__mutex)
        self.__mem_not_allocated = Condition(self.__mutex)
        self.__round_robin_policy_on = False


    def enable_round_robin(self, round_robin_quantum):
        self.__round_robin_policy_on = True
        self.__round_robin = RoundRobin(round_robin_quantum)

    def pcb_not_set(self):
        return self.__pcb_not_set


    def set_kernel(self, kernel):
        self.__kernel = kernel

    def is_pcb_set(self):
        return self.pcb != None

    def set_current_pcb(self, pcb):
        with self.__pcb_not_set:
            self.pcb = pcb
            self.__pcb_not_set.notify()

    def reset_pcb(self):
        self.pcb = None

    def get_current_pcb(self):
        return self.pcb

    def __get_mem_manager(self):
        return self.__kernel.get_mem_manager()

    def __get_irq_manager(self):
        return self.__kernel.get_irq_manager()

    def fetch_decode_and_execute(self):
        with self.__pcb_not_set:
            while(not self.is_pcb_set()):
                self.__pcb_not_set.wait()

            with self.__mutex:
                self.__fetch()
                self.__decode()
                self.__execute()

    def __fetch(self):
        pcb = self.get_current_pcb()
        address = self.__get_mem_manager().current_instruction_address(pcb)
        with self.__mem_not_allocated:
            while self.__get_mem_manager().get(pcb,address) == None:
                self.__mem_not_allocated.wait()
            self.__current_instruction = self.__get_mem_manager().get(pcb, address )

    def __decode(self):
        self.__send_interruption_if_is_io()
        self.__send_interruption_if_is_kill()

    def __send_interruption_if_is_kill(self):
        if(self.__current_instruction.is_kill_instruction()):
            self.send_end()

    def __send_interruption_if_is_io(self):
        if(self.__current_instruction.is_io_instruction()):
            self.send_io()

    def __execute(self):
        self.__execute_if_is_cpu_instruction()

    def __execute_if_is_cpu_instruction(self):
        if (self.__current_instruction.is_cpu_instruction()):
            self.__current_instruction.run()
            self.get_current_pcb().increment_pc()

    def send_interruption(self, a_interruption):
        self.__get_irq_manager().handle(Irq(a_interruption, self.get_current_pcb()))

    def send_timeout(self):
        self.send_interruption(TIMEOUT_INTERRUPT)

    def send_end(self):
        self.send_interruption(KILL_INTERRUPT)

    def send_io(self):
        self.send_interruption(IO_INTERRUPT)

    def on_signal(self):
        if self.__round_robin_policy_on:
            self.__round_robin.handle_action(self)
        else:
            self.fetch_decode_and_execute()