Exemple #1
0
class AMQPInput(BaseInput):

	log = logging.getLogger("%s.AMQPInput" %(__name__))

	def __init__(self, aggr_config, namespace="local", config={}):
		super(AMQPInput, self).__init__(aggr_config, namespace, config)
		self.exit = Event()

	def run(self):
		self.log.info("Connecting to (%s): %s" %(self.outputType, self.outputUri))
		aggrConn = self.connectAggregator()

		rabbitmqConsumer = RabbitMQConsumer(**self.config)

		def callback(event):
			if not event.has_key('namespace'):
				event['namespace'] = self.namespace

			aggrConn.send(event)

			if self.exit.is_set():
				aggrConn.close()
				rabbitmqConsumer.stop()

		rabbitmqConsumer.userCallbacks = [ callback ]	
		rabbitmqConsumer.start()

	def shutdown(self):
		self.exit.set()
Exemple #2
0
class Updater(Process):

    def __init__(self, maxsize=15):
        Process.__init__(self)
        #self.queue = Queue(maxsize)
        self.queue = Queue()
        self.queue_lock = Lock()
        self._exit = Event()

    def run(self):
        while not self._exit.is_set():
            #with self.queue_lock:
            self.queue.put(self.receive())
            #self.queue.put_nowait(self.receive())
            #if self.queue.full():
            #    try:
            #        self.queue.get_nowait()
            #    except:
            #        pass

    def stop(self):
        self._exit.set()
        # This leaves the process hanging on Windows
        #self.join(STOP_TIMEOUT)
        if self.is_alive():
            #TODO make a nicer warning
            print 'Terminating updater:', self
            self.terminate()

    def receive(self):
        raise NotImplementedError
Exemple #3
0
def pipelineDaemon(pipeline, returnEvent, options=None, programName=None):
    """Launches Pyro server and (if specified by options) pipeline executors"""
    
    #check for valid pipeline 
    if pipeline.runnable.empty()==None:
        print "Pipeline has no runnable stages. Exiting..."
        sys.exit()

    if options.urifile==None:
        options.urifile = os.path.abspath(os.curdir + "/" + "uri")
    
    e = Event()
    process = Process(target=launchServer, args=(pipeline,options,e,))
    process.start()
    e.wait()
    if options.num_exec != 0:
        processes = [Process(target=launchPipelineExecutor, args=(options,programName,)) for i in range(options.num_exec)]
        for p in processes:
            p.start()
        for p in processes:
            p.join()
    
    #Return to calling code if pipeline has no more runnable stages:
    #Event will be cleared once clients are unregistered. 
    while e.is_set():
        time.sleep(5)
    returnEvent.set()
Exemple #4
0
class fmanager:
    def __init__(self,data,fn):
        self.sf = Event()
        self.sf.clear()

        self.nproc=cpu_count()
        self.pipes = [Pipe() for i in xrange(self.nproc)]
        self.e = [evaluator(self.pipes[i][1],self.sf,data,fn) for i in xrange(self.nproc)]
        null = [i.start() for i in self.e]
        return

    def __del__(self):
        self.sf.set()
        null = [i.join() for i in self.e]
        null = [i.terminate() for i in self.e]

        return

    def eval(self,x):
        nd = len(x)
        for i in xrange(nd):
            self.pipes[i % self.nproc][0].send([i, x[i]])
        solns = []
        while len(solns) < nd:
            for i in xrange(self.nproc):
                if self.pipes[i][0].poll(0.005):
                    solns.append(self.pipes[i][0].recv())
        solns.sort(key=lambda i: i[0])
        return [i[1] for i in solns]
Exemple #5
0
class New_Process_Actor(Actor):
    '''Create an Actor in a new process. Connected as usual with scipysim 
    channels. When this Actor is started, it launches a new process, creates
    an instance of the Actor class passed to it in a second thread, and starts
    that actor.
    '''
    def __init__(self, cls, *args, **kwargs):
        super(New_Process_Actor, self).__init__()
        self.cls = cls
        self.args = list(args)
        self.kwargs = kwargs
        self.mqueue = MQueue()
        self.mevent = MEvent()
        
        if 'input_channel' not in kwargs:
            kwargs['input_channel'] = self.args[0]
        
        chan = kwargs['input_channel']
        kwargs['input_channel'] = self.mqueue
        
        
        print 'chan: ', chan
        self.c2p = Channel2Process(chan, self.mevent, self.mqueue)
        
        self.c2p.start()


    def run(self):
        self.t = Process(target=target, args=(self.cls, self.args, self.kwargs))
        self.t.start()
        self.mevent.set() # signal that process is ready to receive
        self.c2p.join()
        self.t.join()
Exemple #6
0
class MistProcess(Process):

    def __init__(self, gpio, sleep=1, name='MistProcess'):
        Process.__init__(self, name=name)
        self.logger = multiprocessing.get_logger()
        self.event = Event()
        self.name = name
        self.gpio = gpio
        self.sleep = sleep
        self.mist = mraa.Gpio(self.gpio)
        self.mist.dir(mraa.DIR_OUT)

    def _mist_on(self):
        self.logger.debug('Mist on')
        self.mist.write(1)

    def _mist_off(self):
        self.logger.debug('Mist off')
        if self.mist:
            self.mist.write(0)

    def run(self):
        self.event.set()
        self.logger.debug('PID: %d' % multiprocessing.current_process().pid)

        while self.event.is_set():
            self._mist_on()
            time.sleep(self.sleep)

    def stop(self):
        self.logger.debug('Process {} will halt.'.format(self.name))
        self.event.clear()
        self._mist_off()
Exemple #7
0
    def execute_action(self, action):
        event = Event()
        queue = Queue()
        proc = Process(
            target=execute_action_proc,
            args=(self.execute, action, event, queue))
        proc.start()

        # Send heartbeat.
        heartbeat_retry = 0
        while not event.is_set():
            event.wait(config.ACTIVITY_HEARTBEAT_INTERVAL)
            try:
                res = self.heartbeat(self.task_token)
                if res['cancelRequested']:
                    proc.terminate()
                    proc.join()
                    return Result('cancelled', -1, '', '', '', -1)
            except Exception as err:
                if heartbeat_retry <= config.ACTIVITY_HEARTBEAT_MAX_RETRY:
                    heartbeat_retry += 1
                    continue
                else:
                    proc.terminate()
                    proc.join()
                    raise

        # Evaluate the result.
        result = queue.get_nowait()
        proc.join()
        return result
Exemple #8
0
class Worker(Process):
    def __init__(self, buffer, reorder_buffer, job):
        Process.__init__(self)
        self.buffer = buffer
        self.reorder_buffer = reorder_buffer
        self.job = job
        self.event = Event()

    def run(self):
        self.event.set()
        while self.event.is_set():
            try:
                block_number, data = self.buffer.get()
            except IOError, e:
                if e.errno == errno.EINTR:
                    data = EOF

            if data == EOF:
                self.stop()
                break
            worked_data = self.job(data)

            while self.event.is_set():
                try:
                    self.reorder_buffer.put(block_number, worked_data)
                    break
                except IndexError:
                    # Block num bigger than expected,
                    # wait untill ReorderBuffer start
                    # processing blocks in this range
                    time.sleep(0.1)
                except IOError, e:
                    if e.errno == errno.EINTR:
                        self.stop()
    def initProcessCommunications(self):
        # Queues and events for the acquisition processes
        self.newFreqEvent = Event()
        self.controlEvent = Event()
        self.newScanEvent = Event()
        self.captureRunningEvent = Event()
        self.recordingEvent = Event()

        self.currentVolt = Value('d',0.0)
        self.currentSamples = Value('i',0)
        self.currentFreq = Value('d',0.0)
        self.currentThick = Value('d',0.0)
        self.currentThin = Value('d',0.0)
        self.currentPower = Value('d',0.0)
        self.currentLW = Value('d',0.0)
        self.currentCycle = Value('i',0)
        self.protonsPerCycle = Value('i',0)
        self.protonsForHRS = Value('i',0)
        self.protonPulse = Value('b',False)
        self.iscool = Value('d',0.0)

        self.dataQueue = Queue()
        self.freqQueue = Queue()
        self.errorQueue = Queue()
        self.messageQueue = Queue()
        self.dataStreamQueue = Queue()
Exemple #10
0
def test_cluster(r):
    list_key = 'cluster_test:q'
    r.delete(list_key)
    task = async('django_q.tests.tasks.count_letters', DEFAULT_WORDLIST, list_key=list_key)
    assert queue_size(list_key=list_key, r=r) == 1
    task_queue = Queue()
    assert task_queue.qsize() == 0
    result_queue = Queue()
    assert result_queue.qsize() == 0
    event = Event()
    event.set()
    # Test push
    pusher(task_queue, event, list_key=list_key, r=r)
    assert task_queue.qsize() == 1
    assert queue_size(list_key=list_key, r=r) == 0
    # Test work
    task_queue.put('STOP')
    worker(task_queue, result_queue, Value('f', -1))
    assert task_queue.qsize() == 0
    assert result_queue.qsize() == 1
    # Test monitor
    result_queue.put('STOP')
    monitor(result_queue)
    assert result_queue.qsize() == 0
    # check result
    assert result(task) == 1506
    r.delete(list_key)
Exemple #11
0
    def test_play_and_record(self):
        """
        Verifies that a Device and play back prerecorded events.
        """
        device = evemu.Device(self.get_device_file())
        devnode = device.devnode
        events_file = self.get_events_file()
        # device.record() calls evemu_record() and is thus missing the
        # description that the input file has
        with open(events_file) as e:
            indata = extract_events(strip_comments(e.readlines()))

        recording_started = Event()
        q = Queue()
        record_process = Process(target=record,
                                 args=(recording_started, devnode, q))
        record_process.start()
        recording_started.wait(100)
        device.play(open(events_file))

        outdata = strip_comments(q.get())
        record_process.join()

        self.assertEquals(len(indata), len(outdata))
        fuzz = re.compile("E: \d+\.\d+ (.*)")
        for i in range(len(indata)):
            lhs = fuzz.match(indata[i])
            self.assertTrue(lhs)
            rhs = fuzz.match(outdata[i])
            self.assertTrue(rhs)
            self.assertEquals(lhs.group(1), rhs.group(1))
Exemple #12
0
def test_recycle(r):
    # set up the Sentinel
    list_key = 'test_recycle_test:q'
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    start_event = Event()
    stop_event = Event()
    # override settings
    Conf.RECYCLE = 2
    Conf.WORKERS = 1
    # set a timer to stop the Sentinel
    threading.Timer(3, stop_event.set).start()
    s = Sentinel(stop_event, start_event, list_key=list_key)
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
    assert s.reincarnations == 1
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    async('django_q.tests.tasks.multiply', 2, 2, list_key=list_key, redis=r)
    task_queue = Queue()
    result_queue = Queue()
    # push two tasks
    pusher(task_queue, stop_event, list_key=list_key, r=r)
    pusher(task_queue, stop_event, list_key=list_key, r=r)
    # worker should exit on recycle
    worker(task_queue, result_queue, Value('f', -1))
    # check if the work has been done
    assert result_queue.qsize() == 2
    # save_limit test
    Conf.SAVE_LIMIT = 1
    result_queue.put('STOP')
    # run monitor
    monitor(result_queue)
    assert Success.objects.count() == Conf.SAVE_LIMIT
    r.delete(list_key)
Exemple #13
0
def test_sentinel():
    start_event = Event()
    stop_event = Event()
    stop_event.set()
    s = Sentinel(stop_event, start_event, list_key='sentinel_test:q')
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
Exemple #14
0
class DataLoaderOnTheGround():
    def __init__(self, config):
        default_config = Config(proc_count = 4)
        self.config = default_config(**config)
        self.exit = Event()
        self.task_list = config.task_list
        self.task_queue = Queue(maxsize = 10)
        self.batch_queue = Queue(maxsize =  10)
        self.workers = []
        self.distributor = Process(target = task_distributor, args = (self,))
        for _ in range(self.config.proc_count):
            self.workers.append(Process(target = config.worker, args = (self,)))

        self.distributor.daemon = True
        self.distributor.start()
        for w in self.workers:
            w.daemon = True
            w.start()
    def next_batch(self):
        return Config(self.batch_queue.get())
    def __del__(self):
        self.exit.set()
        self.distributor.join()
        for w in self.workers:
            w.join()
Exemple #15
0
def test_sentinel():
    start_event = Event()
    stop_event = Event()
    stop_event.set()
    s = Sentinel(stop_event, start_event, broker=get_broker('sentinel_test:q'))
    assert start_event.is_set()
    assert s.status() == Conf.STOPPED
Exemple #16
0
    def __init__(self, client, group, topic, auto_commit=True,
                 auto_commit_every_n=AUTO_COMMIT_MSG_COUNT,
                 auto_commit_every_t=AUTO_COMMIT_INTERVAL,
                 num_procs=1, partitions_per_proc=0,
                 offset_dict=None,
                 fetch_size_bytes=FETCH_MIN_BYTES,
                 buffer_size=FETCH_BUFFER_SIZE_BYTES,
                 max_buffer_size=MAX_FETCH_BUFFER_SIZE_BYTES):

        # Initiate the base consumer class
        super(MultiProcessConsumer, self).__init__(
            client, group, topic,
            partitions=None,
            auto_commit=auto_commit,
            auto_commit_every_n=auto_commit_every_n,
            auto_commit_every_t=auto_commit_every_t)

        # Variables for managing and controlling the data flow from
        # consumer child process to master
        self.queue = MPQueue(1024)  # Child consumers dump messages into this
        self.start = Event()        # Indicates the consumers to start fetch
        self.exit = Event()         # Requests the consumers to shutdown
        self.pause = Event()        # Requests the consumers to pause fetch
        self.size = Value('i', 0)   # Indicator of number of messages to fetch

        partitions = self.offsets.keys()

        # If unspecified, start one consumer per partition
        # The logic below ensures that
        # * we do not cross the num_procs limit
        # * we have an even distribution of partitions among processes
        if not partitions_per_proc:
            partitions_per_proc = round(len(partitions) * 1.0 / num_procs)
            if partitions_per_proc < num_procs * 0.5:
                partitions_per_proc += 1

        # The final set of chunks
        chunker = lambda *x: [] + list(x)
        chunks = map(chunker, *[iter(partitions)] * int(partitions_per_proc))

        self.procs = []
        for chunk in chunks:
            chunk = filter(lambda x: x is not None, chunk)
            args = (client.copy(),
                    group, topic, chunk,
                    self.queue, self.start, self.exit,
                    self.pause, self.size)

            kwargs = {}
            if offset_dict is not None:
                kwargs['offset_dict'] = offset_dict

            kwargs['fetch_size_bytes'] = fetch_size_bytes
            kwargs['buffer_size'] = buffer_size
            kwargs['max_buffer_size'] = max_buffer_size

            proc = Process(target=_mp_consume, args=args, kwargs=kwargs)
            proc.daemon = True
            proc.start()
            self.procs.append(proc)
Exemple #17
0
class DataLoaderOnTheFly():
    def __init__(self, config):
        default_config = Config(proc_count = 4, limit_batch_count = None)
        self.config = default_config(**config)
        self.exit = Event()
        self.batch_queue = Queue(maxsize = 10)
        if self.config.limit_batch_count is None:
            self.limited = False
        else:
            self.limited = True
            self.batch_list = []
            self.index = -1
        self.workers = []
        for _ in range(self.config.proc_count):
            self.workers.append(Process(target = config.worker, args = (self,)))
        for w in self.workers:
            w.daemon = True
            w.start()
    def next_batch(self):
        if self.limited:
            if len(self.batch_list) < self.config.limit_batch_count:
                self.batch_list.append(Config(self.batch_queue.get()))
            self.index = (self.index + 1) % self.config.limit_batch_count
            return Config(self.batch_list[self.index])
        else:
            return Config(self.batch_queue.get())
    def __del__(self):
        self.exit.set()
        for w in self.workers:
            w.join()
    def face_proc(self, child_face_recog: Pipe, e_new_person: Event):
        """
        Parallel process of saving people for face recognition

        Arguments:
            child_face_recog {Pipe} -- pipe for communication with parent process,
                recieve ROI ndarray type of recognized object
        """
        if not os.path.exists('humans'):
            print('created', os.getcwd())
            os.mkdir('humans')
        else:
            print('exist')
        os.chdir(os.path.join(os.getcwd(), 'humans'))
        is_first = True
        counter = 0
        while True:
            if e_new_person.is_set():
                counter = 0
                if not is_first:
                    os.chdir('..')
                new_dir = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
                os.mkdir(new_dir)
                print('Created', os.getcwd() + new_dir)
                os.chdir(os.path.join(os.getcwd(), new_dir))
                e_new_person.clear()
                is_first = False
            image = child_face_recog.recv()
            cv.imwrite(filename=str(counter) + '.jpg', img=image)
            print('image saved:', os.getcwd() + str(counter) + '.jpg')
            counter += 1
Exemple #19
0
    def _process(self, worker_count, hard, permissions_only=False):
        idxs = ["allowedRolesAndUsers"] if permissions_only else []
        terminator = Event()
        toggle_debug = Event()

        def handle_SIGUSR1(signum, frame):
            if toggle_debug.is_set():
                toggle_debug.clear()
            else:
                toggle_debug.set()

        signal.signal(signal.SIGUSR1, handle_SIGUSR1)

        p = Process(
            target=_run_model_catalog_init,
            args=(worker_count, hard, idxs, terminator, toggle_debug)
        )
        try:
            p.start()
            p.join()
        except KeyboardInterrupt:
            log.info("Received signal to terminate, stopping subprocess")
            terminator.set()
            p.join(90)
            if p.is_alive():
                log.info("Timeout waiting for subprocess to exit gracefully")
                p.terminate()
    def testThreadChecker(self):
        stop_event = Event()
        link = "munichre.com"
        checker = SiteThreadChecker(full_link=link, thread_pool_size=3, max_page=3000, max_level=10)

        def crawl():
            checker.crawling()

        queue_server_t = Process(target=run_queue_server)
        queue_server_t.start()
        output_t = Process(target=single_output, args=(stop_event,))
        output_t.start()
        # link = "http://sweetlifebake.com/#axzz3t4Nx7b7N"
        crawl_t = Thread(target=crawl)
        crawl_t.start()
        timeout = 1000
        counter = 0
        while counter < timeout:
            time.sleep(1)
            counter += 1
        print("is going to sudden death.")
        stop_event.set()
        checker.sudden_death()
        if crawl_t.is_alive():
            crawl_t.join()
        output_t.terminate()
        queue_server_t.terminate()

        print("finished")
    def recog_proc(self, child_recog: Pipe, e_recog: Event, yolo_type: str):
        """
        Parallel process for object recognition

        Arguments:
            child_recog {Pipe} -- pipe for communication with parent process,
                sends bbox yolo type of recognized object
            e_recog {Event} -- event for indicating complete recognize in frame
        """

        # initialize YOLO
        yolo = Yolo(yolo_type)
        e_recog.set()
        print("yolo defined")

        while True:
            frame = child_recog.recv()
            print("recog process frame recieved")
            if frame is None:
                print("FRAME NONE? R U SURE ABOUT THAT?!")
                return
            res = yolo.detect(frame, cvmat=True)
            print("recog send")
            e_recog.set()
            child_recog.send(res)
    def run(self):
        logger = self.ipc_logger()
        input_queue = Queue(20 * self.n_processes)
        done_event = Event()
        processes = [
            ProteinDigestingProcess(
                self.connection, self.hypothesis_id, input_queue,
                self.digestor, done_event=done_event,
                message_handler=logger.sender()) for i in range(
                self.n_processes)
        ]
        protein_ids = self.protein_ids
        i = 0
        n = len(protein_ids)
        chunk_size = 2
        interval = 30
        for process in processes:
            input_queue.put(protein_ids[i:(i + chunk_size)])
            i += chunk_size
            process.start()

        last = i
        while i < n:
            input_queue.put(protein_ids[i:(i + chunk_size)])
            i += chunk_size
            if i - last > interval:
                self.log("... Dealt Proteins %d-%d %0.2f%%" % (
                    i - chunk_size, min(i, n), (min(i, n) / float(n)) * 100))
                last = i

        done_event.set()
        for process in processes:
            process.join()
        logger.stop()
Exemple #23
0
class DeviceServer(ThreadedTCPServer, Process):
	
	#causes handle_request to return
	timeout = 1
	
	def __init__(self, mux, muxdevice, server_address, RequestHandlerClass):
		Process.__init__(self)
		ThreadedTCPServer.__init__(self, server_address, RequestHandlerClass)
		self.mux = mux
		self.muxdev = muxdevice
		self._stop = Event()

	def stop(self):
		self._stop.set()
		
	def stopped(self):
		return self._stop.is_set()

	def run(self):
		if self.stopped():
			_LOGGER.warning("Thread already stopped")
		
		while not self.stopped():
			self.handle_request()
		self.socket.close()
		_LOGGER.debug("%s will exit now" % (str(self)))
Exemple #24
0
def main():
    # Use alphazero self-play for data generation
    agents_meta = parse_schedule() 

    # worker variable of main process
    board = Board()
    sigexit = Event()
    sigexit.set()  # pre-set signal so main proc generator will iterate only once

    # subprocess data generator
    helper = DataHelper(data_files=[])
    helper.set_agents_meta(agents_meta=agents_meta)     
    generator = helper.generate_batch(TRAINING_CONFIG["batch_size"])

    # start generating
    with h5py.File(f"{DATA_CONFIG['data_path']}/latest.train.hdf5", 'a') as hf:    
        for state_batch, value_batch, probs_batch in generator:
            for batch_name in ("state_batch", "value_batch", "probs_batch"):
                if batch_name not in hf:
                    shape = locals()[batch_name].shape
                    hf.create_dataset(batch_name, (0, *shape), maxshape=(None, *shape))
                hf[batch_name].resize(hf[batch_name].shape[0] + 1, axis=0)
                hf[batch_name][-1] = locals()[batch_name]

            # prevent main proc from generating data too quick
            # since sigexit has been set, proc will iterate only once
            run_proc(helper.buffer, helper.buffer_size, helper.lock,
                     sigexit, agents_meta, board) 
            board.reset()
Exemple #25
0
	def __init__(self, id, config, sequence, hist_obj, results_path, log_id):
		Process.__init__(self)
		self.id = id
		self.config = config
		self.sequence = sequence
		self.hist_obj = hist_obj
		self.agent = Agent(self.id, config, sequence)
		self.results_path = results_path
		self.log_id = log_id
		self.leader_send = None
		self.leader_recv = None
		self.support_send = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.support_recv = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.root_div_send = None
		self.leader_div_send = None
		self.agent_div_recv = [None for i in range(1, self.config.num_agents)] if self.agent.id_leader == None else None
		self.support_div_recv = [None for i in range(1, self.config.num_sup+1)] if self.agent.id_supporters else None
		self.leader_reset_send = None
		self.leader_reset_recv = None
		self.support_reset_send = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.support_reset_recv = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.event_restart = Event()
		self.stop_event = Event()
		self.support_stop_event = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.energy_number = Queue(1)
		self.support_energy_number = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
class QueueTask:
    def __init__(self):
        self.queue = JoinableQueue()
        self.event = Event()
        atexit.register( self.queue.join )

        process = Process(target=self.work)
        process.daemon = True
        process.start()


    def work(self):
        while True:
            func, args, wait_for = self.queue.get()

            for evt in wait_for: 
                evt.wait()
            func(*args)
            self.event.set()

            self.queue.task_done()


    def enqueue(self, func, args=[], wait_for=[]):
        self.event.clear()
        self.queue.put( (func, args, wait_for) )

        return self.event 
Exemple #27
0
class SubProcessWrapper:
	cname = __name__ + '.SubProcessWrapper'
	def __init__(self, target, name=None):
		self.target = target
		self.running = False
		self.name = name if name else target.task_name()
		self.kill_event = Event()
		self.logger = logging.getLogger(self.cname)

	def run(self):
		self.logger.info("starting SubProcessTask: {}".format(self.target.task_name()))
		th = Thread(target=self.target, name=self.target.task_name())
		th.start()		
		signal.signal(signal.SIGINT, signal.SIG_IGN)
		self.kill_event.wait()
		self.logger.info("stopping SubProcessTask: {}".format(self.target.task_name()))
		self.target.stop()
		th.join()
		self.logger.info("Stopped SubProcessTask: {}".format(self.target.task_name()))

	def __call__(self):
		self.run()

	def get_kill_event(self):
		return self.kill_event
class Logger(object):
    def __init__(self, filename):
        self.qtag = Queue()
        self.done = Event()
        self.tag = None
        self.filename = filename
        self.file = None
    def start(self):
        self.file = open(self.filename, 'w')
        print 'Opened',self.filename,'for writing.'
    def set_tag(self, tag):
        self.qtag.put(tag)
    def set_done(self):
        self.done.set()
    def log(self, nodeid, msgid, data):
        if not self.qtag.empty():
            self.tag = self.qtag.get()
        if self.done.is_set():
            self.done.clear()
            return True
        L = ['%f'%time.time(), '%d'%nodeid, '%d'%msgid] + map(str,data)
        if self.tag:
            L.append(self.tag)
        print >>self.file, ','.join(L)
        self.file.flush()
    def close(self):
        if self.file:
            self.file.close()
            print 'File closed.'
Exemple #29
0
class ClassifierWorkerPool(object):
    def __init__(self):
        self.queue = Queue(100)
        self.workers = []
        self.stop = Event()
        self.stop.clear()
        self.queue_feeder = QueueFeeder(self.queue, self.stop)

        row = TrainedClassifiers.objects(name=config.classifier).first()

        if not row:
            raise Exception("Classifier %s does not exists" % config.classifier)

        self.trained_classifier = row.get_classifier()

    def start(self):
        self.queue_feeder.start()

        for i in range(0, config.classifier_pool_size):
            worker = ClassifierWorker(self.trained_classifier, self.queue, self.stop)
            worker.start()
            self.workers.append(worker)

    def terminate(self):
        self.stop.set()
        self.queue_feeder.join()
        for w in self.workers:
            w.join()
def single_output(stop_event: Event):
    print("single output get queue:")
    sum_limit = 1000
    counter = 0
    manager, output_q = get_queue_client(QueueManager.MachineSettingCrawler, QueueManager.Method_Whois_Input)
    while not stop_event.is_set():
        try:
            while not output_q.empty() or not stop_event.is_set():
                result = output_q.get(False, 1)
                counter += 1
                if isinstance(result, list):
                    for item in result:
                        print("server queue output:", str(item), "count:", counter)
                else:
                    # print(result)
                    pass
                if counter/sum_limit > 0 and counter % sum_limit==0:
                    print("current output count is:", counter)
                time.sleep(0.000001)
        except Exception as ex:
            pass
            # manager, output_q = get_queue_client(QueueManager.MachineSettingCrawler, QueueManager.Method_Whois_Output)
        finally:
            print("going to sleep.")
            time.sleep(1)
Exemple #31
0
    def _run_tests(self, tests, **kwargs):
        # tests = dict where the key is a test group name and the value are
        # the tests to run
        tests_queue = Queue()
        results_queue = Queue()
        stop_event = Event()

        pending_tests = {}
        pending_not_thread_safe_tests = {}
        completed_tests = {}
        failures = 0
        errors = 0

        start_time = time.time()
        # First tun tests which are not thread safe in the main process
        for group in self._not_thread_safe:
            if group not in tests.keys():
                continue

            group_tests = tests[group]
            del tests[group]

            self.log('Running tests in a main process: %s' % (group_tests))
            pending_not_thread_safe_tests[group] = group_tests
            result = self._tests_func(tests=group_tests, worker_index=None)
            results_queue.put((group, result), block=False)

        for group, tests in tests.iteritems():
            tests_queue.put((group, tests), block=False)
            pending_tests[group] = tests

        worker_count = self._worker_count
        if worker_count == 'auto':
            worker_count = len(pending_tests)
        elif worker_count == 'cpu':
            worker_count = multiprocessing.cpu_count()

        if worker_count > len(pending_tests):
            # No need to spawn more workers then there are tests.
            worker_count = len(pending_tests)

        worker_args = (tests_queue, results_queue, stop_event)
        workers = self._create_worker_pool(pool_size=worker_count,
                                           target_func=self._run_tests_worker,
                                           worker_args=worker_args)

        for index, worker in enumerate(workers):
            self.log('Staring worker %s' % (index))
            worker.start()

        while pending_tests:
            try:
                try:
                    group, result = results_queue.get(timeout=self._parent_timeout,
                                                    block=True)
                except Exception:
                    continue

                try:
                    if group not in pending_not_thread_safe_tests:
                        pending_tests.pop(group)
                    else:
                        pending_not_thread_safe_tests.pop(group)
                except KeyError:
                    self.log('Got a result for unknown group: %s' % (group))
                else:
                    completed_tests[group] = result
                    self._print_result(result)

                    if result.failures or result.errors:
                        failures += len(result.failures)
                        errors += len(result.errors)

                        if self.failfast:
                            # failfast is enabled, kill all the active workers and stop
                            for worker in workers:
                                if worker.is_alive():
                                    worker.terminate()
                            break
            except Empty:
                worker_left = False

                for worker in workers:
                    if worker.is_alive():
                        worker_left = True
                        break

                if not worker_left:
                    break

        # We are done, signalize all the workers to stop
        stop_event.set()

        end_time = time.time()
        self._exit(start_time, end_time, failures, errors)
Exemple #32
0
        },
        "done": {}
    }
    global_rb = manager.PrioritizedReplayBuffer(memory_size,
                                                env_dict=env_dict,
                                                alpha=PER_a,
                                                eps=PER_e)

    n_explorer = multiprocessing.cpu_count() - 1

    n_queue = n_explorer
    n_queue += 1  # for evaluation
    queues = [manager.Queue() for _ in range(n_queue)]

    # Event object to share training status. if event is set True, all exolorers stop sampling transitions
    is_training_done = Event()

    # Lock
    lock = manager.Lock()

    # Shared memory objects to count number of samples and applied gradients
    trained_steps = Value('i', 0)

    tasks = []
    local_buffer_size = 100
    episode_max_steps = 200

    for i in range(n_explorer):
        tasks.append(
            Process(target=explorer,
                    args=[
Exemple #33
0
#conProc = RemoteControlReceiver([],[nucS])
#allProcesses.append(conProc)
#conProc1 = AutonomousController([laneR2, objR2],[])
#allProcesses.append(conProc1)

nucProc = NucleoInterface([nucR], [])
allProcesses.append(nucProc)

# ========================================================================================

print("Starting the processes!", allProcesses)
for proc in allProcesses:
    proc.daemon = True
    proc.start()

blocker = Event()

try:
    blocker.wait()
except KeyboardInterrupt:
    print(
        "\nCatching a KeyboardInterruption exception! Shutdown all processes.\n"
    )
    for proc in allProcesses:
        if hasattr(proc, 'stop') and callable(getattr(proc, 'stop')):
            print("Process with stop", proc)
            proc.stop()
            proc.join()
        else:
            print("Process witouth stop", proc)
            proc.terminate()
Exemple #34
0
    def _al_create_and_start_processes(self, tasks_data):
        #0. Create output queues
        for data in tasks_data:
            task_id = data[1]
            queue = Queue(1)
            self._task_output_queues[task_id] = queue

        self._termination_event = Event()

        #1. Initialize tasks
        tasks = []
        for data in tasks_data:
            node = data[0]
            node_id = data[1]
            parent_node_id = data[2]
            is_last = data[3]

            #1.1 Creating messenger for task
            task_queue = self._task_output_queues.get(node_id)
            if parent_node_id is not None:
                parent_task_queue = self._task_output_queues.get(
                    parent_node_id)
            else:
                parent_task_queue = None

            messenger = BatchprocessingQueueMessenger(node, task_queue,
                                                      parent_task_queue,
                                                      self._termination_event)

            if isinstance(node, ProducerNode):
                task = ProducerTask(node, messenger, node_id, is_last)
                tasks.append(task)

            elif isinstance(node, ProcessorNode):
                if node.nb_tasks > 1:
                    receiveQueue = Queue(10)
                    accountingQueue = Queue()
                    output_queues = [Queue() for _ in range(node.nb_tasks)]

                    # Create receive task
                    receive_task = MultiprocessingReceiveTask(
                        node, parent_task_queue, receiveQueue, BATCH)
                    tasks.append(receive_task)

                    # Create processor tasks
                    mp_tasks_lock = Lock()
                    for idx in range(node.nb_tasks):
                        mp_task = MultiprocessingProcessorTask(
                            idx, node, mp_tasks_lock, receiveQueue,
                            accountingQueue, output_queues[idx])
                        tasks.append(mp_task)

                    # Create output task
                    output_task = MultiprocessingOutputTask(
                        node, task_queue, accountingQueue, output_queues,
                        BATCH, is_last)
                    tasks.append(output_task)
                else:
                    task = ProcessorTask(node, messenger, node_id, is_last,
                                         parent_node_id)
                    tasks.append(task)

            elif isinstance(node, ConsumerNode):
                task = ConsumerTask(node, messenger, node_id, is_last,
                                    parent_node_id)
                tasks.append(task)

        #2. Create processes
        for task in tasks:
            if isinstance(task, ProcessorTask) or isinstance(
                    task, MultiprocessingProcessorTask):
                if task.device_type == GPU:
                    self._next_gpu_index += 1
                    if self._next_gpu_index < self._nb_available_gpus:
                        proc = create_process_task_gpu(
                            task, self._gpu_ids[self._next_gpu_index])
                    else:
                        try:
                            task.change_device(CPU)
                            proc = create_process_task(task)
                        except:
                            raise RuntimeError(
                                'No GPU available to allocate {}'.format(
                                    str(task._computation_node)))
                else:
                    proc = create_process_task(task)
            else:
                proc = create_process_task(task)
            self._procs.append(proc)

        #3. Start processes.
        for proc in self._procs:
            proc.start()
Exemple #35
0
class MetricsCollector:
    def __init__(self, interval=1, collect_load=False, use_thread=False):
        self._event_start_collect_metrics = Event()
        self._event_stop = Event()
        self._tflops = Value('d', 0.0)
        self._cpu_tflops = Value('d', 0.0)
        self._gpu_tflops = Value('d', 0.0)
        self._pid = Value('i', 0)
        self._tflops_lock = RLock()
        self.interval = interval
        self._process = None
        self._start_timestamp = None
        self._end_timestamp = None
        self._collect_load = collect_load
        self._use_thread = use_thread
        self._cpu_loads = []
        self._gpu_loads = []

    def get_tflops(self):
        with self._tflops_lock:
            return self._tflops.value

    def get_gpu_tflops(self):
        with self._tflops_lock:
            return self._gpu_tflops.value

    def get_cpu_tflops(self):
        with self._tflops_lock:
            return self._cpu_tflops.value

    @property
    def average_cpu_load(self):
        if len(self._cpu_loads):
            return sum(self._cpu_loads) / float(len(self._cpu_loads))
        return 0

    @property
    def average_gpu_load(self):
        if len(self._gpu_loads):
            return sum(self._gpu_loads) / float(len(self._gpu_loads))
        return 0

    def add_tflops(self, cpu_tflops, gpu_tflops):
        with self._tflops_lock:
            self._tflops.value += cpu_tflops + gpu_tflops
            self._gpu_tflops.value += gpu_tflops
            self._cpu_tflops.value += cpu_tflops

    def set_pid(self, pid):
        self._pid.value = pid

    def get_pid(self):
        return self._pid.value

    def _start_collect_metrics(self):
        self._start_timestamp = time.time()
        self._event_start_collect_metrics.set()

    def wait_for_start_collect_metrics(self):
        self._event_start_collect_metrics.wait()

    def should_stop_collect_metrics(self, wait):
        return self._event_stop.wait(wait)

    def _stop_collect_metrics(self):
        logger.info('Signal for stop collect metrics')
        self._event_start_collect_metrics.set()
        self._event_stop.set()
        self._end_timestamp = time.time()

    def __enter__(self):
        self._start_collect_metrics()

    def __exit__(self, exc_type, exc_val, exc_tb):
        self._stop_collect_metrics()

    def start_and_wait_signal(self):
        # be sure this instance will not start collect metrics more than once
        assert self._process is None
        if self._use_thread:
            self._process = Thread(target=self._collect_metrics)
        else:
            self._process = Process(target=self._collect_metrics)
        self._process.start()

    def clean(self):
        self._stop_collect_metrics()

        if self._process:
            logger.info(
                'Wait for end of process collect metrics PID: {}'.format(
                    self._process.pid))
            self._process.join(10)
            if self._process.is_alive():
                logger.info('Terminating process {}'.format(self._process.pid))
                self._process.terminate()
            logger.info('Process of collect metrics is finished')

    def _collect_metrics(self):
        self.wait_for_start_collect_metrics()
        logger.info('Start collect metrics for PID: {}'.format(self.get_pid()))
        try:
            snapshot = ProcessSnapshot(self.get_pid())
            while not self.should_stop_collect_metrics(self.interval):
                snapshot.update()
                self.add_tflops(
                    cpu_tflops=snapshot.get_cpu_tflops() * self.interval,
                    gpu_tflops=snapshot.get_gpu_tflops() * self.interval)

                if self._collect_load:
                    self._cpu_loads.append(snapshot.get_cpu_load())
                    self._gpu_loads.append(snapshot.get_gpu_load())

        except NoSuchProcess as ex:
            logger.exception(ex)

        logger.info('Stop collect metrics for PID: {}'.format(self.get_pid()))

    @property
    def total_seconds(self):
        return self._end_timestamp - self._start_timestamp
Exemple #36
0
 def __init__(self):
     self.proc = None
     self.daemon = None
     self.stop = Event()
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.consume = Event()
     self.waiting = Event()



def clock(clock_ready, number_of_days):
	c = 0
	
	while c < nb_of_days:
		clock_ready.set() #All threads waiting for the flag to become true are awakened. 
		c += 1
		time_i = time.time()
		
		while time.time() - time_i < delay:
			time.sleep(2)


if __name__ == "__main__":
    start = time.time()
    nb_of_days = input("Entrez le nombre de jours pour la simulation") #finalement on peut le mettre à l'infini et faire ctrl c pourstopper
    nb_of_homes = input("Entrez le nombre de maison au minimum 3")

    clock_ready = Event()



    c = Process(target = clock, args = (clock_ready, nb_of_days,))
    c.start()

    c.join()

Exemple #39
0
class TestDLTMessageHandler(unittest.TestCase):
    def setUp(self):
        self.filter_queue = Queue()
        self.message_queue = Queue()
        self.client_cfg = {
            "ip_address": "127.0.0.1",
            "filename": "/dev/null",
            "verbose": 0,
        }
        self.stop_event = Event()
        self.handler = DLTMessageHandler(self.filter_queue, self.message_queue,
                                         self.stop_event, self.client_cfg)

    def test_init(self):
        self.assertFalse(self.handler.mp_stop_flag.is_set())
        self.assertFalse(self.handler.is_alive())
        self.assertTrue(self.handler.filter_queue.empty())
        self.assertTrue(self.handler.message_queue.empty())

    def test_run_basic(self):
        self.assertFalse(self.handler.is_alive())
        self.handler.start()
        self.assertTrue(self.handler.is_alive())
        self.assertNotEqual(self.handler.pid, os.getpid())
        self.stop_event.set()
        self.handler.join()
        self.assertFalse(self.handler.is_alive())

    def test_handle_add_new_filter(self):
        self.handler.filter_queue.put(("queue_id", [("SYS", "JOUR")], True))
        time.sleep(0.01)
        self.handler.handle(None)
        self.assertIn(("SYS", "JOUR"), self.handler.context_map)
        self.assertEqual(self.handler.context_map[("SYS", "JOUR")],
                         ["queue_id"])

    def test_handle_remove_filter_single_entry(self):
        self.handler.filter_queue.put(("queue_id", [("SYS", "JOUR")], True))
        time.sleep(0.01)
        self.handler.handle(None)
        self.assertIn(("SYS", "JOUR"), self.handler.context_map)
        self.assertEqual(self.handler.context_map[("SYS", "JOUR")],
                         ["queue_id"])

        self.handler.filter_queue.put(("queue_id", [("SYS", "JOUR")], False))
        time.sleep(0.01)
        self.handler.handle(None)
        self.assertNotIn(("SYS", "JOUR"), self.handler.context_map)

    def test_handle_remove_filter_multiple_entries(self):
        self.handler.filter_queue.put(("queue_id1", [("SYS", "JOUR")], True))
        self.handler.filter_queue.put(("queue_id2", [("SYS", "JOUR")], True))
        time.sleep(0.01)
        self.handler.handle(None)
        self.assertIn(("SYS", "JOUR"), self.handler.context_map)
        self.assertEqual(self.handler.context_map[("SYS", "JOUR")],
                         ["queue_id1", "queue_id2"])

        self.handler.filter_queue.put(("queue_id1", [("SYS", "JOUR")], False))
        time.sleep(0.01)
        self.handler.handle(None)
        self.assertIn(("SYS", "JOUR"), self.handler.context_map)
        self.assertEqual(self.handler.context_map[("SYS", "JOUR")],
                         ["queue_id2"])

    def test_handle_multiple_similar_filters(self):
        self.handler.filter_queue.put(("queue_id0", [("SYS", "JOUR")], True))
        self.handler.filter_queue.put(("queue_id1", [("SYS", "JOUR")], True))
        time.sleep(0.01)
        self.handler.handle(None)
        self.assertIn(("SYS", "JOUR"), self.handler.context_map)
        self.assertEqual(self.handler.context_map[("SYS", "JOUR")],
                         ["queue_id0", "queue_id1"])

    def test_handle_multiple_different_filters(self):
        self.filter_queue.put(("queue_id0", [("SYS", "JOUR")], True))
        self.filter_queue.put(("queue_id1", [("DA1", "DC1")], True))
        time.sleep(0.01)
        self.handler.handle(None)
        self.assertIn(("SYS", "JOUR"), self.handler.context_map)
        self.assertIn(("DA1", "DC1"), self.handler.context_map)
        self.assertEqual(self.handler.context_map[("SYS", "JOUR")],
                         ["queue_id0"])
        self.assertEqual(self.handler.context_map[("DA1", "DC1")],
                         ["queue_id1"])

    def test_handle_message_tag_and_distribute(self):
        self.filter_queue.put(("queue_id0", [("SYS", "JOUR")], True))
        self.filter_queue.put(("queue_id1", [("DA1", "DC1")], True))
        self.filter_queue.put(("queue_id2", [("SYS", None)], True))
        self.filter_queue.put(("queue_id3", [(None, "DC1")], True))
        self.filter_queue.put(("queue_id4", [(None, None)], True))
        time.sleep(0.01)

        # - simulate receiving of messages
        for _ in range(10):
            for message in create_messages(stream_multiple, from_file=True):
                self.handler.handle(message)

        self.assertIn(("SYS", "JOUR"), self.handler.context_map)
        self.assertIn(("DA1", "DC1"), self.handler.context_map)
        self.assertIn((None, None), self.handler.context_map)
        self.assertIn(("SYS", None), self.handler.context_map)
        self.assertIn((None, "DC1"), self.handler.context_map)
        try:
            # 60 == 10 messages of each for SYS, JOUR and None combinations +
            #       10 for (None,None)
            messages = [
                self.message_queue.get(timeout=0.01) for _ in range(60)
            ]

            # these queues should not get any messages from other queues
            self.assertEqual(
                len([msg for qid, msg in messages if qid == 'queue_id0']), 10)
            self.assertEqual(
                len([msg for qid, msg in messages if qid == 'queue_id1']), 10)
            self.assertEqual(
                len([msg for qid, msg in messages if qid == 'queue_id2']), 10)
            self.assertEqual(
                len([msg for qid, msg in messages if qid == 'queue_id3']), 10)
            # this queue should get all messages
            self.assertEqual(
                len([msg for qid, msg in messages if qid == 'queue_id4']), 20)
        except Empty:
            # - we should not get an Empty for at least 40 messages
            self.fail()
Exemple #40
0
class Publisher:
    """ Publisher class in which the queue for publishing messages is defined and also a separated process is started.
    It is important to have a new process, since the serialization/deserialization of messages from the QUEUE may be
    a bottleneck for performance.
    """
    def __init__(self, port=None):
        self.logger = get_logger(name=__name__)
        if not port:
            self._port = config.zmq_port
        else:
            self._port = port
        self._queue = Queue(
        )  # The publisher will grab and broadcast the messages from this queue
        self._event = Event()  # This event is used to stop the process
        self._process = None
        self.logger.info('Initialized publisher on port {}'.format(self._port))

    def start(self):
        """ Start a new process that will be responsible for broadcasting the messages.

            .. TODO:: Find a way to start the publisher on a different port if the one specified is in use.
        """
        self._event.clear()
        try:
            self._process = Process(
                target=publisher, args=[self._queue, self._event, self._port])
            self._process.start()
            return True
        except zmq.ZMQError:
            self._port += 1
            config.zmq_port = self._port
            return self.start()

        # sleep(1)  # This forces the start to block until the publisher is ready

    def stop(self):
        self._event.set()
        self.empty_queue()

    def empty_queue(self):
        """ If the publisher stops before broadcasting all the messages, the Queue may still be using some memory. This
        method is simply getting all the elements in order to free memory. Can be useful for garbage collection or
        better control of the downstream program.
        """
        self.logger.info('Emptying the queue of the publisher')
        self.logger.debug('Queue length: {}'.format(self._queue.qsize()))
        self._queue.close()

    def publish(self, topic, data):
        """ Adapts the data to make it faster to broadcast

        :param str topic: Topic in which to publish the data
        :param data: Data to be published
        :return: None
        """
        self.logger.debug('Adding data of type {} to topic {}'.format(
            type(data), topic))
        try:
            self._queue.put({'topic': topic, 'data': data})
        except AssertionError:
            # This means the queue has been closed already
            pass

    @property
    def port(self):
        return self._port

    @port.setter
    def port(self, new_port):
        if new_port != self._port:
            self._port = new_port
            self.logger.warning(
                'Changing the port requires restarting the publisher process')
            self.logger.debug(
                'Setting the new publisher port to {}'.format(new_port))
            self.stop()
            self._process.join()
            self._process = Process(
                target=publisher, args=[self._queue, self._event, self._port])
            self.start()
        else:
            self.logger.warning(
                'New port {} is the same as the old port'.format(new_port))

    def join(self, timeout=0):
        if self._process.is_alive():
            self.logger.debug('Waiting for Publisher process to finish')
            self._process.join(timeout)
Exemple #41
0
class FramesStats(WigProcess):
    """
    TODO: Documentation
    """

    __module_name__ = "Frame Stats"

    def __init__(self, frames_queue, output_queue, injection_queue=None):
        WigProcess.__init__(self)
        self.__stop__ = Event()

        self.__queue__ = frames_queue
        self.__output__ = output_queue
        self.__total_frames_count__ = 0
        self.__type_management_count__ = 0
        self.__type_control_count__ = 0
        self.__type_data_count__ = 0
        self.__type_unknown_count__ = 0

    def run(self):
        """
        TODO: Documentation
        """
        self.set_process_title()

        try:
            while not self.__stop__.is_set():
                try:
                    frame = self.__queue__.get(timeout=5)
                    self.__total_frames_count__ += 1

                    frame_type = ieee80211.get_frame_type(frame)

                    if frame_type == 0:
                        self.__type_management_count__ += 1
                    elif frame_type == 1:
                        self.__type_control_count__ += 1
                    elif frame_type == 2:
                        self.__type_data_count__ += 1
                    else:
                        self.__type_unknown_count__ += 1
                except Empty:
                    pass
        # Ignore SIGINT signal, this is handled by parent.
        except KeyboardInterrupt:
            pass

        aux =  OrderedDict()
        aux['Module'] = self.__module_name__
        aux['Frames'] = self.__total_frames_count__
        aux['Management Frames'] = self.__type_management_count__
        aux['Control Frames'] = self.__type_control_count__
        aux['Data Frames'] = self.__type_data_count__
        aux['Unknown Frames'] = self.__type_unknown_count__
        self.__output__.put(aux)

    def get_frame_type_filter(self):
        """
        Returns a list of IEEE 802.11 frame types supported by the module.
        """
        # Empty filter means all frames
        return []

    def get_frame_subtype_filter(self):
        """
        Returns a list of IEEE 802.11 frame subtypes supported by the module.
        """
        # Empty filter means all frames
        return []

    def shutdown(self):
        """
        This method sets the __stop__ event to stop the process.
        """
        self.__stop__.set()
Exemple #42
0
class BufferedCapture(Process):
    """ Capture from device to buffer in memory.
    """
    
    running = False
    
    def __init__(self, array1, startTime1, array2, startTime2, config, video_file=None):
        """ Populate arrays with (startTime, frames) after startCapture is called.
        
        Arguments:
            array1: numpy array in shared memory that is going to be filled with frames
            startTime1: float in shared memory that holds time of first frame in array1
            array2: second numpy array in shared memory
            startTime2: float in shared memory that holds time of first frame in array2

        Keyword arguments:
            video_file: [str] Path to the video file, if it was given as the video source. None by default.

        """
        
        super(BufferedCapture, self).__init__()
        self.array1 = array1
        self.startTime1 = startTime1
        self.array2 = array2
        self.startTime2 = startTime2
        
        self.startTime1.value = 0
        self.startTime2.value = 0
        
        self.config = config

        self.video_file = video_file

        # A frame will be considered dropped if it was late more then half a frame
        self.time_for_drop = 1.5*(1.0/config.fps)

        self.dropped_frames = 0
    


    def startCapture(self, cameraID=0):
        """ Start capture using specified camera.
        
        Arguments:
            cameraID: ID of video capturing device (ie. ID for /dev/video3 is 3). Default is 0.
            
        """
        
        self.cameraID = cameraID
        self.exit = Event()
        self.start()
    


    def stopCapture(self):
        """ Stop capture.
        """
        
        self.exit.set()

        time.sleep(1)

        log.info("Joining capture...")

        # Wait for the capture to join for 60 seconds, then terminate
        for i in range(60):
            if self.is_alive():
                time.sleep(1)
            else:
                break

        if self.is_alive():
            log.info('Terminating capture...')
            self.terminate()


    def initVideoDevice(self):
        """ Initialize the video device. """

        device = None

        # use a file as the video source
        if self.video_file is not None:
            device = cv2.VideoCapture(self.video_file)

        # Use a device as the video source
        else:

            # If an analog camera is used, skip the ping
            ip_cam = False
            if "rtsp" in str(self.config.deviceID):
                ip_cam = True


            if ip_cam:

                ### If the IP camera is used, check first if it can be pinged

                # Extract the IP address
                ip = re.findall(r"[0-9]+(?:\.[0-9]+){3}", self.config.deviceID)

                # Check if the IP address was found
                if ip:
                    ip = ip[0]

                    # Try pinging 5 times
                    ping_success = False

                    for i in range(500):

                        print('Trying to ping the IP camera...')
                        ping_success = ping(ip)

                        if ping_success:
                            log.info("Camera IP ping successful!")
                            break

                        time.sleep(5)

                    if not ping_success:
                        log.error("Can't ping the camera IP!")
                        return None

                else:
                    return None



            # Init the video device
            log.info("Initializing the video device...")
            log.info("Device: " + str(self.config.deviceID))
            if self.config.force_v4l2:
                device = cv2.VideoCapture(self.config.deviceID, cv2.CAP_V4L2)
                device.set(cv2.CAP_PROP_CONVERT_RGB, 0)
            else:
                device = cv2.VideoCapture(self.config.deviceID)

            # Try setting the resultion if using a video device, not gstreamer
            try:

                # This will fail if the video device is a gstreamer pipe
                int(self.config.deviceID)
                
                # Set the resolution (crashes if using an IP camera and gstreamer!)
                device.set(3, self.config.width_device)
                device.set(4, self.config.height_device)

            except:
                pass


        return device


    def run(self):
        """ Capture frames.
        """
        
        # Init the video device
        device = self.initVideoDevice()


        if device is None:

            log.info('The video source could not be opened!')
            self.exit.set()
            return False


        # Wait until the device is opened
        device_opened = False
        for i in range(20):
            time.sleep(1)
            if device.isOpened():
                device_opened = True
                break


        # If the device could not be opened, stop capturing
        if not device_opened:
            log.info('The video source could not be opened!')
            self.exit.set()
            return False

        else:
            log.info('Video device opened!')



        # Throw away first 10 frame
        for i in range(10):
            device.read()


        first = True

        wait_for_reconnect = False
        
        # Run until stopped from the outside
        while not self.exit.is_set():


            lastTime = 0
            
            if first:
                self.startTime1.value = 0
            else:
                self.startTime2.value = 0
            

            # If the video device was disconnected, wait 5s for reconnection
            if wait_for_reconnect:

                print('Reconnecting...')

                while not self.exit.is_set():

                    log.info('Waiting for the video device to be reconnected...')

                    time.sleep(5)

                    # Reinit the video device
                    device = self.initVideoDevice()


                    if device is None:
                        print("The video device couldn't be connected! Retrying...")
                        continue


                    if self.exit.is_set():
                        break

                    # Read the frame
                    log.info("Reading frame...")
                    ret, frame = device.read()
                    log.info("Frame read!")

                    # If the connection was made and the frame was retrieved, continue with the capture
                    if ret:
                        log.info('Video device reconnected successfully!')
                        wait_for_reconnect = False
                        break


                wait_for_reconnect = False


            t_frame = 0
            t_assignment = 0
            t_convert = 0
            t_block = time.time()
            block_frames = 256

            log.info('Grabbing a new block of {} frames...'.format(block_frames))
            for i in range(block_frames):

                # Read the frame
                t1_frame = time.time()
                ret, frame = device.read()
                t_frame = time.time() - t1_frame


                # If the video device was disconnected, wait for reconnection
                if not ret:

                    log.info('Frame grabbing failed, video device is probably disconnected!')

                    wait_for_reconnect = True
                    break



                # If the end of the file was reached, stop the capture
                if (self.video_file is not None) and (frame is None):

                    log.info('End of video file! Press Ctrl+C to finish.')

                    self.exit.set()
                    
                    time.sleep(0.1)

                    break

                
                t = time.time()

                if i == 0: 
                    startTime = t

                # Check if frame is dropped if it has been more than 1.5 frames than the last frame
                elif (t - lastTime) >= self.time_for_drop:
                    
                    # Calculate the number of dropped frames
                    n_dropped = int((t - lastTime)*self.config.fps)
                    
                    if self.config.report_dropped_frames:
                        log.info(str(n_dropped) + " frames dropped! Time for frame: {:.3f}, convert: {:.3f}, assignment: {:.3f}".format(t_frame, t_convert, t_assignment))

                    self.dropped_frames += n_dropped

                    

                lastTime = t
                
                t1_convert = time.time()

                # Convert the frame to grayscale
                #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                # Convert the frame to grayscale
                if len(frame.shape) == 3:

                    # If a color image is given, take the green channel
                    if frame.shape[2] == 3:

                        gray = frame[:, :, 1]

                    else:
                        gray = frame[:, :, 0]

                else:
                    gray = frame


                # Cut the frame to the region of interest (ROI)
                gray = gray[self.config.roi_up:self.config.roi_down, \
                    self.config.roi_left:self.config.roi_right]


                t_convert = time.time() - t1_convert


                # Assign the frame to shared memory
                t1_assign = time.time()
                if first:
                    self.array1[i, :gray.shape[0], :gray.shape[1]] = gray
                else:
                    self.array2[i, :gray.shape[0], :gray.shape[1]] = gray

                t_assignment = time.time() - t1_assign

                # If video is loaded from a file, simulate real FPS
                if self.video_file is not None:

                    time.sleep(1.0/self.config.fps)

                    # If the video finished, stop the capture
                    if not device.isOpened():
                        self.exit.set()



            if self.exit.is_set():
                wait_for_reconnect = False
                log.info('Capture exited!')
                break


            if not wait_for_reconnect:

                # Set the starting value of the frame block, which indicates to the compression that the
                # block is ready for processing
                if first:
                    self.startTime1.value = startTime

                else:
                    self.startTime2.value = startTime

                log.info('New block of raw frames available for compression with starting time: {:s}'.format(str(startTime)))

            
            # Switch the frame block buffer flags
            first = not first
            if self.config.report_dropped_frames:
                log.info('Estimated FPS: {:.3f}'.format(block_frames/(time.time() - t_block)))
        

        log.info('Releasing video device...')
        device.release()
        log.info('Video device released!')
Exemple #43
0
 def __init__(self, output_queue):
     WigProcess.__init__(self)
     self.__stop__ = Event()
     self.__queue__ = output_queue
Exemple #44
0
class Mediator(WigProcess):
    """
    The objective of the intermediary consumer class is to push specific
    type/subtype IEEE 802.11 frames to different frame processing classes.
    """

    def __init__(self, frames_queue, output_queue, producer_type, injection_queue=None):
        WigProcess.__init__(self)
        self.__queue__ = frames_queue
        self.__output_queue__ = output_queue
        self.__injection_queue__ = injection_queue

        self.__stop__ = Event()

        self.__producer_type__ = producer_type
        self.__timeout_event__ = Event()

        self.consumers_list = [FramesStats,
                               InformationElementsStats,
                               CiscoClientExtensions,
                               WiFiProtectedSetup,
                               WiFiDirect,
                               HewlettPackardVendorSpecificTypeZero,
                               AppleWirelessDirectLink]

    def run(self):
        """
        Process data from the frames queue and write them to specific queues.
        """
        self.set_process_title()

        if self.__producer_type__ == INFINITE_TYPE:
            self.run_from_infinite_producers()
        else:
            self.run_from_finite_producers()

    def run_from_finite_producers(self):
        """
        This method handles the state when processing data from finite
        producers.
        """
        # Consumers initialization
        consumer_list = list()
        for consumer in self.consumers_list:
            consumer_queue = Queue()
            consumer_instance = consumer(consumer_queue, self.__output_queue__)
            consumer_instance.start()
            consumer_list.append((consumer_instance, consumer_queue))

        try:
            while not self.__stop__.is_set():
                try:
                    if self.__timeout_event__.is_set():
                        # We are working with a finite producer, such as pcap
                        # file, we wait for a couple of seconds if the queue is
                        # empty we assume producers have finish and we stop
                        # processing.
                        frame = self.__queue__.get(timeout=30)
                    else:
                        # The timeout_event is not set, we still have producers
                        # pending to start. We need to wait.
                        frame = self.__queue__.get(timeout=300)
                except Empty:
                    break
                for item in consumer_list:
                    consumer = item[0]
                    consumer_queue = item[1]
                    # Check consumer filters
                    frame_type = ieee80211.get_frame_type(frame)
                    if frame_type in consumer.get_frame_type_filter():
                        frame_subtype = ieee80211.get_frame_subtype(frame)
                        if frame_subtype in consumer.get_frame_subtype_filter():
                            consumer_queue.put(frame)
                    # If filters are empty we put the frame into the queue.
                    if not consumer.get_frame_type_filter() and \
                       not consumer.get_frame_subtype_filter():
                       consumer_queue.put(frame)
        # Ignore SIGINT signal, this is handled by parent.
        except KeyboardInterrupt:
            pass
        except Exception as e:
            self.__output_queue__.put({'Exception': str(e)})
        finally:
            # We need to wait for consumers to finish.
            self.__output_queue__.put({' ': 'Waiting for modules to finish. Please wait...'})
            while True:
                try:
                    if consumer_list:
                        for item in consumer_list:
                            consumer = item[0]
                            consumer_queue = item[1]
                            if consumer_queue.empty():
                                consumer.shutdown()
                                consumer_list.remove(item)
                    else:
                        break
                    # Wait between checks to avoid high cpu consumption.
                    time.sleep(5)
                # except KeyboardInterrupt:
                    # traceback.print_stack()
                except Exception as e:
                    self.__output_queue__.put({'Exception': str(e)})

    def run_from_infinite_producers(self):
        """
        This method handles the state when processing data from infinite
        producers.
        """
        # Consumers initialization
        consumer_list = list()
        for consumer in self.consumers_list:
            consumer_queue = Queue()
            if self.__injection_queue__:
                consumer_instance = consumer(consumer_queue, self.__output_queue__, self.__injection_queue__)
            else:
                consumer_instance = consumer(consumer_queue, self.__output_queue__)
            consumer_instance.start()
            consumer_list.append((consumer_instance, consumer_queue))

        try:
            while not self.__stop__.is_set():
                # We are working with a infinite producer, such as network
                # interface cards, we wait until a frame is put from the
                # producers.
                frame = self.__queue__.get()
                for item in consumer_list:
                    consumer = item[0]
                    consumer_queue = item[1]
                    # Check consumer filters
                    frame_type = ieee80211.get_frame_type(frame)
                    if frame_type in consumer.get_frame_type_filter():
                        frame_subtype = ieee80211.get_frame_subtype(frame)
                        if frame_subtype in consumer.get_frame_subtype_filter():
                            consumer_queue.put(frame)
                    # If filters are empty we put the frame into the queue.
                    if not consumer.get_frame_type_filter() and \
                       not consumer.get_frame_subtype_filter():
                       consumer_queue.put(frame)
        # Ignore SIGINT signal, this is handled by parent.
        except KeyboardInterrupt:
            pass
        except Exception as e:
            self.__output_queue__.put({'Exception': str(e)})
        finally:
            for item in consumer_list:
                consumer = item[0]
                if consumer.is_alive():
                    consumer.shutdown()
                    try:
                        # Wait for 30 seconds for consumer to shutdown and join.
                        consumer.join(30)
                    except TimeoutError:
                        # Force consumer to terminate.
                        self.__output_queue__.put({'Timeout Error':
                              'Forcing %s to terminate.' % consumer.__class__.__name__})
                        consumer.terminate()

    def shutdown(self):
        """
        This method sets the __stop__ event to stop the process.
        """
        self.__stop__.set()

    def timeout_event(self):
        """
        This method sets the __timeout_event__ to change the queue timeout.
        This was added to fix the race condition that happends between a new
        producer starts and the queue timeout monitoring in the mediator.
        """
        self.__timeout_event__.set()
Exemple #45
0
class Sentinel(object):
    def __init__(self,
                 stop_event,
                 start_event,
                 broker=None,
                 timeout=Conf.TIMEOUT,
                 start=True):
        # Make sure we catch signals for the pool
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        signal.signal(signal.SIGTERM, signal.SIG_DFL)
        self.pid = current_process().pid
        self.parent_pid = get_ppid()
        self.name = current_process().name
        self.broker = broker or get_broker()
        self.reincarnations = 0
        self.tob = timezone.now()
        self.stop_event = stop_event
        self.start_event = start_event
        self.pool_size = Conf.WORKERS
        self.pool = []
        self.timeout = timeout
        self.task_queue = Queue(
            maxsize=Conf.QUEUE_LIMIT) if Conf.QUEUE_LIMIT else Queue()
        self.result_queue = Queue()
        self.event_out = Event()
        self.monitor = None
        self.pusher = None
        if start:
            self.start()

    def start(self):
        self.broker.ping()
        self.spawn_cluster()
        self.guard()

    def status(self):
        if not self.start_event.is_set() and not self.stop_event.is_set():
            return Conf.STARTING
        elif self.start_event.is_set() and not self.stop_event.is_set():
            if self.result_queue.empty() and self.task_queue.empty():
                return Conf.IDLE
            return Conf.WORKING
        elif self.stop_event.is_set() and self.start_event.is_set():
            if self.monitor.is_alive() or self.pusher.is_alive() or len(
                    self.pool) > 0:
                return Conf.STOPPING
            return Conf.STOPPED

    def spawn_process(self, target, *args):
        """
        :type target: function or class
        """
        p = Process(target=target, args=args)
        p.daemon = True
        if target == worker:
            p.timer = args[2]
            self.pool.append(p)
        p.start()
        return p

    def spawn_pusher(self):
        return self.spawn_process(pusher, self.task_queue, self.event_out,
                                  self.broker)

    def spawn_worker(self):
        self.spawn_process(worker, self.task_queue, self.result_queue,
                           Value('f', -1), self.timeout)

    def spawn_monitor(self):
        return self.spawn_process(monitor, self.result_queue, self.broker)

    def reincarnate(self, process):
        """
        :param process: the process to reincarnate
        :type process: Process or None
        """
        db.connections.close_all()  # Close any old connections
        if process == self.monitor:
            self.monitor = self.spawn_monitor()
            logger.error(
                _("reincarnated monitor {} after sudden death").format(
                    process.name))
        elif process == self.pusher:
            self.pusher = self.spawn_pusher()
            logger.error(
                _("reincarnated pusher {} after sudden death").format(
                    process.name))
        else:
            self.pool.remove(process)
            self.spawn_worker()
            if self.timeout and int(process.timer.value) == 0:
                # only need to terminate on timeout, otherwise we risk destabilizing the queues
                process.terminate()
                logger.warn(
                    _("reincarnated worker {} after timeout").format(
                        process.name))
            elif int(process.timer.value) == -2:
                logger.info(_("recycled worker {}").format(process.name))
            else:
                logger.error(
                    _("reincarnated worker {} after death").format(
                        process.name))

        self.reincarnations += 1

    def spawn_cluster(self):
        self.pool = []
        Stat(self).save()
        db.connection.close()
        # spawn worker pool
        for __ in range(self.pool_size):
            self.spawn_worker()
        # spawn auxiliary
        self.monitor = self.spawn_monitor()
        self.pusher = self.spawn_pusher()
        # set worker cpu affinity if needed
        if psutil and Conf.CPU_AFFINITY:
            set_cpu_affinity(Conf.CPU_AFFINITY, [w.pid for w in self.pool])

    def guard(self):
        logger.info(
            _('{} guarding cluster at {}').format(current_process().name,
                                                  self.pid))
        self.start_event.set()
        Stat(self).save()
        logger.info(_('Q Cluster-{} running.').format(self.parent_pid))
        scheduler(broker=self.broker)
        counter = 0
        cycle = 0.5  # guard loop sleep in seconds
        # Guard loop. Runs at least once
        while not self.stop_event.is_set() or not counter:
            # Check Workers
            for p in self.pool:
                # Are you alive?
                if not p.is_alive() or (self.timeout and p.timer.value == 0):
                    self.reincarnate(p)
                    continue
                # Decrement timer if work is being done
                if self.timeout and p.timer.value > 0:
                    p.timer.value -= cycle
            # Check Monitor
            if not self.monitor.is_alive():
                self.reincarnate(self.monitor)
            # Check Pusher
            if not self.pusher.is_alive():
                self.reincarnate(self.pusher)
            # Call scheduler once a minute (or so)
            counter += cycle
            if counter == 30 and Conf.SCHEDULER:
                counter = 0
                scheduler(broker=self.broker)
            # Save current status
            Stat(self).save()
            sleep(cycle)
        self.stop()

    def stop(self):
        Stat(self).save()
        name = current_process().name
        logger.info(_('{} stopping cluster processes').format(name))
        # Stopping pusher
        self.event_out.set()
        # Wait for it to stop
        while self.pusher.is_alive():
            sleep(0.1)
            Stat(self).save()
        # Put poison pills in the queue
        for __ in range(len(self.pool)):
            self.task_queue.put('STOP')
        self.task_queue.close()
        # wait for the task queue to empty
        self.task_queue.join_thread()
        # Wait for all the workers to exit
        while len(self.pool):
            for p in self.pool:
                if not p.is_alive():
                    self.pool.remove(p)
            sleep(0.1)
            Stat(self).save()
        # Finally stop the monitor
        self.result_queue.put('STOP')
        self.result_queue.close()
        # Wait for the result queue to empty
        self.result_queue.join_thread()
        logger.info(_('{} waiting for the monitor.').format(name))
        # Wait for everything to close or time out
        count = 0
        if not self.timeout:
            self.timeout = 30
        while self.status() == Conf.STOPPING and count < self.timeout * 10:
            sleep(0.1)
            Stat(self).save()
            count += 1
        # Final status
        Stat(self).save()
Exemple #46
0
 def __init__(self, input_queue, ifaces):
     WigProcess.__init__(self)
     self.__stop__ = Event()
     self.__ifaces__ = ifaces
     self.__queue__ = input_queue
Exemple #47
0
 def __init__(self, scheme=None):
     self.proc = None
     self.daemon = None
     self.stop = Event()
     self.scheme = scheme
Exemple #48
0
class Cluster(object):
    def __init__(self, broker=None):
        self.broker = broker or get_broker()
        self.sentinel = None
        self.stop_event = None
        self.start_event = None
        self.pid = current_process().pid
        self.host = socket.gethostname()
        self.timeout = Conf.TIMEOUT
        signal.signal(signal.SIGTERM, self.sig_handler)
        signal.signal(signal.SIGINT, self.sig_handler)

    def start(self):
        # Start Sentinel
        self.stop_event = Event()
        self.start_event = Event()
        self.sentinel = Process(target=Sentinel,
                                args=(self.stop_event, self.start_event,
                                      self.broker, self.timeout))
        self.sentinel.start()
        logger.info(_('Q Cluster-{} starting.').format(self.pid))
        while not self.start_event.is_set():
            sleep(0.1)
        return self.pid

    def stop(self):
        if not self.sentinel.is_alive():
            return False
        logger.info(_('Q Cluster-{} stopping.').format(self.pid))
        self.stop_event.set()
        self.sentinel.join()
        logger.info(_('Q Cluster-{} has stopped.').format(self.pid))
        self.start_event = None
        self.stop_event = None
        return True

    def sig_handler(self, signum, frame):
        logger.debug(
            _('{} got signal {}').format(
                current_process().name,
                Conf.SIGNAL_NAMES.get(signum, 'UNKNOWN')))
        self.stop()

    @property
    def stat(self):
        if self.sentinel:
            return Stat.get(self.pid)
        return Status(self.pid)

    @property
    def is_starting(self):
        return self.stop_event and self.start_event and not self.start_event.is_set(
        )

    @property
    def is_running(self):
        return self.stop_event and self.start_event and self.start_event.is_set(
        )

    @property
    def is_stopping(self):
        return self.stop_event and self.start_event and self.start_event.is_set(
        ) and self.stop_event.is_set()

    @property
    def has_stopped(self):
        return self.start_event is None and self.stop_event is None and self.sentinel
Exemple #49
0
def test_scheduler(broker, monkeypatch):
    broker.list_key = 'scheduler_test:q'
    broker.delete_queue()
    schedule = create_schedule('math.copysign',
                               1,
                               -1,
                               name='test math',
                               hook='django_q.tests.tasks.result',
                               schedule_type=Schedule.HOURLY,
                               repeats=1)
    assert schedule.last_run() is None
    # check duplicate constraint
    with pytest.raises(IntegrityError):
        schedule = create_schedule('math.copysign',
                                   1,
                                   -1,
                                   name='test math',
                                   hook='django_q.tests.tasks.result',
                                   schedule_type=Schedule.HOURLY,
                                   repeats=1)
    # run scheduler
    scheduler(broker=broker)
    # set up the workflow
    task_queue = Queue()
    stop_event = Event()
    stop_event.set()
    # push it
    pusher(task_queue, stop_event, broker=broker)
    assert task_queue.qsize() == 1
    assert broker.queue_size() == 0
    task_queue.put('STOP')
    # let a worker handle them
    result_queue = Queue()
    worker(task_queue, result_queue, Value('b', -1))
    assert result_queue.qsize() == 1
    result_queue.put('STOP')
    # store the results
    monitor(result_queue)
    assert result_queue.qsize() == 0
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.repeats == 0
    assert schedule.last_run() is not None
    assert schedule.success() is True
    assert schedule.next_run < arrow.get(timezone.now()).shift(hours=+1)
    task = fetch(schedule.task)
    assert task is not None
    assert task.success is True
    assert task.result < 0
    # Once schedule with delete
    once_schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                    2,
                                    word='django',
                                    schedule_type=Schedule.ONCE,
                                    repeats=-1,
                                    hook='django_q.tests.tasks.result')
    assert hasattr(once_schedule, 'pk') is True
    # negative repeats
    always_schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                      2,
                                      word='django',
                                      schedule_type=Schedule.DAILY,
                                      repeats=-1,
                                      hook='django_q.tests.tasks.result')
    assert hasattr(always_schedule, 'pk') is True
    # Minute schedule
    minute_schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                      2,
                                      word='django',
                                      schedule_type=Schedule.MINUTES,
                                      minutes=10)
    assert hasattr(minute_schedule, 'pk') is True
    # Cron schedule
    cron_schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                    2,
                                    word='django',
                                    schedule_type=Schedule.CRON,
                                    cron="0 22 * * 1-5")
    assert hasattr(cron_schedule, 'pk') is True
    assert cron_schedule.full_clean() is None
    assert cron_schedule.__unicode__() == 'django_q.tests.tasks.word_multiply'
    with pytest.raises(ValidationError):
        create_schedule('django_q.tests.tasks.word_multiply',
                        2,
                        word='django',
                        schedule_type=Schedule.CRON,
                        cron="0 22 * * 1-12")
    # All other types
    for t in Schedule.TYPE:
        if t[0] == Schedule.CRON:
            continue
        schedule = create_schedule('django_q.tests.tasks.word_multiply',
                                   2,
                                   word='django',
                                   schedule_type=t[0],
                                   repeats=1,
                                   hook='django_q.tests.tasks.result')
        assert schedule is not None
        assert schedule.last_run() is None
        scheduler(broker=broker)
    # via model
    Schedule.objects.create(func='django_q.tests.tasks.word_multiply',
                            args='2',
                            kwargs='word="django"',
                            schedule_type=Schedule.DAILY)
    # scheduler
    scheduler(broker=broker)
    # ONCE schedule should be deleted
    assert Schedule.objects.filter(pk=once_schedule.pk).exists() is False
    # Catch up On
    monkeypatch.setattr(Conf, 'CATCH_UP', True)
    now = timezone.now()
    schedule = create_schedule('django_q.tests.tasks.word_multiply',
                               2,
                               word='catch_up',
                               schedule_type=Schedule.HOURLY,
                               next_run=timezone.now() - timedelta(hours=12),
                               repeats=-1)
    scheduler(broker=broker)
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.next_run < now
    # Catch up off
    monkeypatch.setattr(Conf, 'CATCH_UP', False)
    scheduler(broker=broker)
    schedule = Schedule.objects.get(pk=schedule.pk)
    assert schedule.next_run > now
    # Done
    broker.delete_queue()
Exemple #50
0
class LBMachineMaster(object):
    """Controls execution of a LB simulation on a single physical machine
    (possibly with multiple GPUs and multiple LB subdomains being simulated)."""
    def __init__(self,
                 config,
                 subdomains,
                 lb_class,
                 subdomain_addr_map=None,
                 channel=None,
                 iface=None):
        """
        :param config: LBConfig object
        :param subdomains: list of SubdomainSpec objects assigned to this machine
        :param lb_class: simulation class descendant from LBSim
        :param subdomain_addr_map: dict mapping subdomain IDs to IP/DNS addresses
        :param channel: execnet Channel object for communication with the
                controller
        :param iface: network interface on which to listen for connections from
                other subdomains
        """

        self.subdomains = subdomains
        self.config = config
        self.lb_class = lb_class
        self._subdomain_addr_map = subdomain_addr_map
        self.runners = []
        self._subdomain_id_to_runner = {}
        self._pipes = []
        self._vis_process = None
        self._vis_quit_event = None
        self._quit_event = Event()
        self._channel = channel

        atexit.register(lambda event: event.set(), event=self._quit_event)

        if iface is not None:
            self._iface = iface
        else:
            self._iface = '*'

        self.config.logger = util.setup_logger(self.config)

    def _assign_subdomains_to_gpus(self):
        subdomain2gpu = {}

        try:
            gpus = len(self.config.gpus)
            for i, subdomain in enumerate(self.subdomains):
                subdomain2gpu[subdomain.id] = self.config.gpus[i % gpus]
        except TypeError:
            for subdomain in self.subdomains:
                subdomain2gpu[subdomain.id] = 0

        return subdomain2gpu

    def _get_ctypes_float(self):
        if self.config.precision == 'double':
            return ctypes.c_double
        else:
            return ctypes.c_float

    def _init_connectors(self):
        """Creates subdomain connectors for all subdomains connections."""
        # A set to keep track which connections are already created.
        _subdomain_conns = set()

        # IDs of the subdomains that are local to this master.
        local_subdomain_ids = set([b.id for b in self.subdomains])
        local_subdomain_map = dict([(b.id, b) for b in self.subdomains])
        ipc_files = []

        for subdomain in self.subdomains:
            connecting_subdomains = subdomain.connecting_subdomains()
            for face, nbid in connecting_subdomains:
                if (subdomain.id, nbid) in _subdomain_conns:
                    continue

                _subdomain_conns.add((subdomain.id, nbid))
                _subdomain_conns.add((nbid, subdomain.id))

                cpair = subdomain.get_connection(face, nbid)
                size1 = cpair.src.elements
                size2 = cpair.dst.elements
                ctype = self._get_ctypes_float()

                opp_face = subdomain.opposite_face(face)
                if (opp_face, nbid) in connecting_subdomains:
                    size1 *= 2
                    size2 *= 2
                    face_str = '{0} and {1}'.format(face, opp_face)
                else:
                    face_str = str(face)

                self.config.logger.debug(
                    "Subdomain connection: {0} <-> {1}: {2}/{3}"
                    "-element buffer (face {4}).".format(
                        subdomain.id, nbid, size1, size2, face_str))

                if nbid in local_subdomain_ids:
                    c1, c2 = ZMQSubdomainConnector.make_ipc_pair(
                        ctype, (size1, size2), (subdomain.id, nbid))
                    subdomain.add_connector(nbid, c1)
                    ipc_files.append(c1.ipc_file)
                    local_subdomain_map[nbid].add_connector(subdomain.id, c2)
                else:
                    receiver = subdomain.id > nbid
                    if receiver:
                        addr = "tcp://{0}".format(
                            self._subdomain_addr_map[nbid])
                    else:
                        addr = "tcp://{0}".format(self._iface)
                    if self.config.compress_intersubdomain_data:
                        c1 = CompressedZMQRemoteSubdomainConnector(
                            addr, receiver=subdomain.id > nbid)
                    else:
                        c1 = ZMQRemoteSubdomainConnector(
                            addr, receiver=subdomain.id > nbid)
                    subdomain.add_connector(nbid, c1)

        return ipc_files

    def _init_visualization_and_io(self, sim):
        if self.config.output:
            output_cls = io.format_name_to_cls[self.config.output_format]
        else:
            output_cls = io.LBOutput

        if self.config.mode != 'visualization':
            return lambda subdomain: output_cls(self.config, subdomain.id)

        # XXX compute total storage requirements
        self._vis_geo_queues = []
        for subdomain in self.subdomains:
            self._vis_geo_queues.append(subdomain.init_visualization())

        vis_lock = mp.Lock()
        vis_config = Value(io.VisConfig, lock=vis_lock)
        vis_config.iteration = -1
        vis_config.field_name = ''
        vis_config.all_subdomains = False

        # Start the visualizatione engine.
        vis_class = None
        for engine in util.get_visualization_engines():
            if engine.name == self.config.vis_engine:
                vis_class = engine
                break

        if vis_class is None:
            self.config.logger.warning('Requested visualization engine not '
                                       'available.')
            try:
                vis_class = util.get_visualization_engines().next()
            except StopIteration:
                self.config.logger.warning(
                    'No visualization backends available. Falling back to '
                    'batch mode.')
                self.config.mode = 'batch'
                return lambda subdomain: output_cls(self.config, subdomain.id)

        # Event to signal that the visualization process should be terminated.
        self._vis_quit_event = Event()
        self._vis_process = Process(target=lambda: vis_class(
            self.config, self.subdomains, self._vis_quit_event, self.
            _quit_event, vis_config, self._vis_geo_queues).run(),
                                    name='VisEngine')
        self._vis_process.start()

        return lambda subdomain: io.VisualizationWrapper(
            self.config, subdomain, vis_config, output_cls)

    def _finish_visualization(self):
        if self.config.mode != 'visualization':
            return

        self._vis_quit_event.set()
        self._vis_process.join()

    def _run_subprocesses(self, output_initializer, backend_cls,
                          subdomain2gpu):
        ctx = zmq.Context()
        sockets = []
        ipc_files = []

        # Create subdomain runners for all subdomains.
        for subdomain in self.subdomains:
            output = output_initializer(subdomain)
            master_addr = 'ipc://{0}/sailfish-master-{1}_{2}'.format(
                tempfile.gettempdir(), os.getpid(), subdomain.id)
            ipc_files.append(master_addr.replace('ipc://', ''))
            sock = ctx.socket(zmq.PAIR)
            sock.bind(master_addr)
            sockets.append(sock)
            p = Process(target=_start_subdomain_runner,
                        name='Subdomain/{0}'.format(subdomain.id),
                        args=(subdomain, self.config, self.sim,
                              len(self.subdomains), backend_cls,
                              subdomain2gpu[subdomain.id], output,
                              self._quit_event, master_addr, self._channel
                              is not None))
            self.runners.append(p)
            self._subdomain_id_to_runner[subdomain.id] = p

        # Start all subdomain runners.
        for runner in self.runners:
            runner.start()

        ports = {}
        for socket in sockets:
            runner_ports = socket.recv_pyobj()
            ports.update(runner_ports)

        # Only process remote port information if we have a channel open
        # back to the controller.
        if self._channel is not None:
            self._channel.send(ports)
            ports = self._channel.receive()
        else:
            # If there is no channel, we're the single master running in this
            # simulation and no port information should be necessary.
            assert not ports

        self.config.logger.debug('Port map is: {0}'.format(ports))

        for socket in sockets:
            socket.send_pyobj(ports)

        if self._channel is not None and self.config.mode == 'benchmark':
            for socket in sockets:
                ti, min_ti, max_ti = socket.recv_pyobj()
                self._channel.send((tuple(ti), tuple(min_ti), tuple(max_ti)))
                socket.send('ack')

        # Wait for all subdomain runners to finish.
        done_runners = set()
        while len(done_runners) != len(self.runners):
            for runner in self.runners:
                if runner not in done_runners and not runner.is_alive():
                    done_runners.add(runner)

            self._quit_event.wait(1)
            if self._quit_event.is_set():
                self.config.logger.info('Received termination request.')
                time.sleep(0.5)
                for runner in self.runners:
                    if runner not in done_runners:
                        runner.terminate()
                break

        for ipcfile in ipc_files:
            os.unlink(ipcfile)

    def run(self):
        self.config.logger.info('Machine master starting with PID {0}'.format(
            os.getpid()))
        self.config.logger.info('Handling subdomains: {0}'.format(
            [b.id for b in self.subdomains]))

        self.sim = self.lb_class(self.config)
        subdomain2gpu = self._assign_subdomains_to_gpus()

        self.config.logger.info(
            'Subdomain -> GPU map: {0}'.format(subdomain2gpu))

        ipc_files = self._init_connectors()
        output_initializer = self._init_visualization_and_io(self.sim)
        try:
            backend_cls = util.get_backends(
                self.config.backends.split(',')).next()
        except StopIteration:
            self.config.logger.error(
                'Failed to initialize compute backend.'
                ' Make sure pycuda/pyopencl is installed.')
            return

        if self.config.debug_single_process:
            assert len(self.subdomains) == 1, (
                'Only a single subdomain can be'
                'simulated in the single process mode.')

            subdomain = self.subdomains[0]
            output = output_initializer(subdomain)
            self.runner = _start_subdomain_runner(
                subdomain, self.config, self.sim, len(self.subdomains),
                backend_cls, subdomain2gpu[subdomain.id], output,
                self._quit_event, None, self._channel is not None)
            self.config.logger.debug(
                'Finished single process subdomain runner.')
        else:
            self._run_subprocesses(output_initializer, backend_cls,
                                   subdomain2gpu)

        self._finish_visualization()

        for ipcfile in ipc_files:
            os.unlink(ipcfile)

        return self._quit_event.is_set()
class WatchmanSubscriber(object):
    def __init__(self, analysis_directory: AnalysisDirectory) -> None:
        self._base_path = os.path.join(
            analysis_directory.get_root(), ".pyre", self._name
        )  # type: str
        self._alive = True  # type: bool
        self._ready = Event()  # type: multiprocessing.synchronize.Event

    @property
    def _name(self) -> str:
        """
            A name to identify the subscriber. Used as the directory and file names
            for the log, lock, and pid files.
        """
        raise NotImplementedError

    @property
    def _subscriptions(self) -> List[Subscription]:
        """
            List of subscriptions
        """
        raise NotImplementedError

    def _handle_response(self, response: Dict[str, Any]) -> None:
        """
            Callback invoked when a message is received from watchman
        """
        raise NotImplementedError

    @property
    @functools.lru_cache(1)
    def _watchman_client(self) -> "pywatchman.client":  # noqa
        try:
            import pywatchman  # noqa

            # The client will block indefinitely when timeout is None.
            return pywatchman.client(timeout=None)
        except ImportError as exception:
            LOG.info("Not starting %s due to %s", self._name, str(exception))
            sys.exit(1)

    def _subscribe_to_watchman(self, subscription: Subscription) -> None:
        self._watchman_client.query(
            "subscribe", subscription.root, subscription.name, subscription.subscription
        )

    def _run(self) -> None:
        try:
            os.makedirs(self._base_path)
        except OSError:
            pass
        lock_path = os.path.join(self._base_path, "{}.lock".format(self._name))
        pid_path = os.path.join(self._base_path, "{}.pid".format(self._name))

        def cleanup() -> None:
            LOG.info("Cleaning up lock and pid files before exiting.")
            remove_if_exists(pid_path)
            remove_if_exists(lock_path)

        def interrupt_handler(_signal_number=None, _frame=None) -> None:
            LOG.info("Interrupt signal received.")
            cleanup()
            sys.exit(0)

        signal.signal(signal.SIGINT, interrupt_handler)

        # Die silently if unable to acquire the lock.
        with acquire_lock(lock_path, blocking=False):
            file_handler = logging.FileHandler(
                os.path.join(self._base_path, "%s.log" % self._name)
            )
            file_handler.setFormatter(
                logging.Formatter("%(asctime)s %(levelname)s %(message)s")
            )
            LOG.addHandler(file_handler)

            with open(pid_path, "w+") as pid_file:
                pid_file.write(str(os.getpid()))

            for subscription in self._subscriptions:
                self._subscribe_to_watchman(subscription)

            connection = self._watchman_client.recvConn
            if not connection:
                LOG.error("Connection to Watchman for %s not found", self._name)
                sys.exit(1)

            while self._alive:
                # This call is blocking, which prevents this loop from burning CPU.
                response = connection.receive()
                try:
                    if response["is_fresh_instance"]:
                        LOG.info(
                            "Ignoring initial watchman message for %s", response["root"]
                        )
                    else:
                        self._handle_response(response)
                    self._ready.set()  # At least one message has been received.
                except KeyError:
                    pass

            cleanup()

    def daemonize(self) -> None:
        """We double-fork here to detach the daemon process from the parent.
           If we were to just fork the child as a daemon, we'd have to worry about the
           parent process exiting zombifying the daemon."""
        if os.fork() == 0:
            pid = os.fork()
            if pid == 0:
                try:
                    # Closing the sys.stdout and stderr file descriptors here causes
                    # the program to crash when attempting to log.
                    os.close(sys.stdout.fileno())
                    os.close(sys.stderr.fileno())
                    self._run()
                    sys.exit(0)
                except Exception as exception:
                    LOG.info("Not running %s due to %s", self._name, str(exception))
                    sys.exit(1)
            else:
                sys.exit(0)
Exemple #52
0
__project__ = "codename"
__date__    = "28-09-2015"

import threading
from scapy.all import *
from socket import gethostbyaddr
from subprocess import call, PIPE, Popen
from collections import OrderedDict
from copy import deepcopy
from multiprocessing import Process,Event, Queue
from random import randint
from allert import AllertManager
from time import sleep
from datetime import datetime, timedelta

arp_thread_stop_event = Event()

# TODO remove it when testing is complete
from test_dataset import get_data
data_d = get_data()


class ARPHandler(Process):

    def __init__(self, poll_delay=2, prune_period=3600):
        self.delay = poll_delay
        self.arp_table = {}
        self.prune_period = prune_period
        self.am = AllertManager()
        self._stop = Event()
        self.mute = False
Exemple #53
0
class GPUMonitor(Process):
    def __init__(self):
        self.logger = ScreenLogger()

        # Prepare signal
        self.stop_signal = Event()
        self.run_signal = Event()
        self.set_signal = Event()

        # Stores list of available GPUs
        self._free_GPUs = Queue()

        super(GPUMonitor, self).__init__(target=self._monitor)
        self.start()

    def stop(self):
        self.stop_signal.set()

    def _monitor(self):
        while not self.stop_signal.is_set():
            if self.run_signal.is_set():
                # Empty queue, just in case...?
                self._free_GPUs.empty()

                # Get free GPUs
                free = get_free_gpus()

                # Add number of elements that will be put in the queue as first
                # element to be read from main process
                self._free_GPUs.put(len(free))

                # Add available GPUs to queue
                for g in free:
                    self._free_GPUs.put(g)

                # Set the signal that main process can start reading queue
                self.set_signal.set()

                # Stop run signal for this process
                self.run_signal.clear()
            else:
                time.sleep(0.5)
                self.set_signal.clear()

    @property
    def free_GPUs(self):
        self.run_signal.set()
        while not self.set_signal.is_set():
            time.sleep(0.2)

        free = []
        for i in range(self._free_GPUs.get()):
            free.append(self._free_GPUs.get())
        return free

    def get_free_GPUs(self, N):
        return _get_free_gpu(self.free_GPUs, N)

    def await_and_set_free_GPU(self, N=0, sleep_seconds=60, stop_after=False):
        cuda_visible_dev = ""
        if N != 0:
            self.logger("Waiting for free GPU.")
            found_gpu = False
            while not found_gpu:
                cuda_visible_dev = self.get_free_GPUs(N=N)
                if cuda_visible_dev:
                    self.logger("Found free GPU: %s" % cuda_visible_dev)
                    found_gpu = True
                else:
                    self.logger("No available GPUs... Sleeping %i seconds." %
                                sleep_seconds)
                    time.sleep(sleep_seconds)
        else:
            self.logger("Using CPU based computations only!")
        self.set_GPUs = cuda_visible_dev
        if stop_after:
            self.stop()

    @property
    def num_currently_visible(self):
        return len(self.set_GPUs.strip().split(","))

    @property
    def set_GPUs(self):
        try:
            return os.environ["CUDA_VISIBLE_DEVICES"]
        except KeyError:
            return ""

    @set_GPUs.setter
    def set_GPUs(self, GPUs):
        set_gpu(GPUs)

    def set_and_stop(self, GPUs):
        self.set_GPUs = GPUs
        self.stop()
 def __init__(self, analysis_directory: AnalysisDirectory) -> None:
     self._base_path = os.path.join(
         analysis_directory.get_root(), ".pyre", self._name
     )  # type: str
     self._alive = True  # type: bool
     self._ready = Event()  # type: multiprocessing.synchronize.Event

if __name__ == '__main__':
    # np.random.seed(RANDOMSEED)
    # tf.random.set_seed(RANDOMSEED)

    # action_dim = 1
    # observation_dim = 3
    t_s = datetime.now()
    env = EnvironmentMpsen()
    _ = env.reset()
    action_dim = env.get_action_space_length()
    observation_dim = env.get_observation_length()

    # 定义两组不同的事件,update 和 rolling
    UPDATE_EVENT = Event()  # ppo更新事件
    ROLLING_EVENT = Event()  # worker收集数据事件
    UPDATE_EVENT.clear()  # not update now,相当于把标志位设置为False ppo事件停止
    ROLLING_EVENT.set()  # start to roll out,相当于把标志位设置为True,并通知所有处于等待阻塞状态的线程恢复运行状态。 worker开始工作
    update_rlock = RLock()
    episode_rlock = RLock()

    ns = Manager().Namespace()
    ns.GLOBAL_UPDATE_COUNTER = 0
    ns.GLOBAL_EPISODE = 0
    ns.GLOBAL_RUNNING_REWARD = []
    ns.coord_status = True

    QUEUE = Manager().Queue()

Exemple #56
0
class Artifacts(object):
    _flush_frequency_sec = 300.
    # notice these two should match
    _save_format = '.csv.gz'
    _compression = 'gzip'
    # hashing constants
    _hash_block_size = 65536
    _pd_artifact_type = 'data-audit-table'

    class _ProxyDictWrite(dict):
        """ Dictionary wrapper that updates an arguments instance on any item set in the dictionary """
        def __init__(self, artifacts_manager, *args, **kwargs):
            super(Artifacts._ProxyDictWrite, self).__init__(*args, **kwargs)
            self._artifacts_manager = artifacts_manager
            # list of artifacts we should not upload (by name & weak-reference)
            self.artifact_metadata = {}
            # list of hash columns to calculate uniqueness for the artifacts
            self.artifact_hash_columns = {}

        def __setitem__(self, key, value):
            # check that value is of type pandas
            if pd and isinstance(value, pd.DataFrame):
                super(Artifacts._ProxyDictWrite, self).__setitem__(key, value)

                if self._artifacts_manager:
                    self._artifacts_manager.flush()
            else:
                raise ValueError(
                    'Artifacts currently support pandas.DataFrame objects only'
                )

        def unregister_artifact(self, name):
            self.artifact_metadata.pop(name, None)
            self.pop(name, None)

        def add_metadata(self, name, metadata):
            self.artifact_metadata[name] = deepcopy(metadata)

        def get_metadata(self, name):
            return self.artifact_metadata.get(name)

        def add_hash_columns(self, artifact_name, hash_columns):
            self.artifact_hash_columns[artifact_name] = hash_columns

        def get_hash_columns(self, artifact_name):
            return self.artifact_hash_columns.get(artifact_name)

    @property
    def registered_artifacts(self):
        # type: () -> Dict[str, Artifact]
        return self._artifacts_container

    @property
    def summary(self):
        # type: () -> str
        return self._summary

    def __init__(self, task):
        self._task = task
        # notice the double link, this important since the Artifact
        # dictionary needs to signal the Artifacts base on changes
        self._artifacts_container = self._ProxyDictWrite(self)
        self._last_artifacts_upload = {}
        self._unregister_request = set()
        self._thread = None
        self._flush_event = Event()
        self._exit_flag = False
        self._summary = ''
        self._temp_folder = []
        self._task_artifact_list = []
        self._task_edit_lock = RLock()
        self._storage_prefix = None

    def register_artifact(self,
                          name,
                          artifact,
                          metadata=None,
                          uniqueness_columns=True):
        # type: (str, object, Optional[dict], bool) -> ()
        """
        :param str name: name of the artifacts. Notice! it will override previous artifacts if name already exists.
        :param pandas.DataFrame artifact: artifact object, supported artifacts object types: pandas.DataFrame
        :param dict metadata: dictionary of key value to store with the artifact (visible in the UI)
        :param list uniqueness_columns: list of columns for artifact uniqueness comparison criteria. The default value
            is True, which equals to all the columns (same as artifact.columns).
        """
        # currently we support pandas.DataFrame (which we will upload as csv.gz)
        if name in self._artifacts_container:
            LoggerRoot.get_base_logger().info(
                'Register artifact, overwriting existing artifact \"{}\"'.
                format(name))
        self._artifacts_container.add_hash_columns(
            name,
            list(artifact.columns
                 if uniqueness_columns is True else uniqueness_columns))
        self._artifacts_container[name] = artifact
        if metadata:
            self._artifacts_container.add_metadata(name, metadata)

    def unregister_artifact(self, name):
        # type: (str) -> ()
        # Remove artifact from the watch list
        self._unregister_request.add(name)
        self.flush()

    def upload_artifact(self,
                        name,
                        artifact_object=None,
                        metadata=None,
                        delete_after_upload=False):
        # type: (str, Optional[object], Optional[dict], bool) -> bool
        if not Session.check_min_api_version('2.3'):
            LoggerRoot.get_base_logger().warning(
                'Artifacts not supported by your TRAINS-server version, '
                'please upgrade to the latest server version')
            return False

        if name in self._artifacts_container:
            raise ValueError(
                "Artifact by the name of {} is already registered, use register_artifact"
                .format(name))

        artifact_type_data = tasks.ArtifactTypeData()
        override_filename_in_uri = None
        override_filename_ext_in_uri = None
        uri = None
        if np and isinstance(artifact_object, np.ndarray):
            artifact_type = 'numpy'
            artifact_type_data.content_type = 'application/numpy'
            artifact_type_data.preview = str(artifact_object.__repr__())
            override_filename_ext_in_uri = '.npz'
            override_filename_in_uri = name + override_filename_ext_in_uri
            fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.',
                                         suffix=override_filename_ext_in_uri)
            os.close(fd)
            np.savez_compressed(local_filename, **{name: artifact_object})
            delete_after_upload = True
        elif pd and isinstance(artifact_object, pd.DataFrame):
            artifact_type = 'pandas'
            artifact_type_data.content_type = 'text/csv'
            artifact_type_data.preview = str(artifact_object.__repr__())
            override_filename_ext_in_uri = self._save_format
            override_filename_in_uri = name
            fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.',
                                         suffix=override_filename_ext_in_uri)
            os.close(fd)
            artifact_object.to_csv(local_filename,
                                   compression=self._compression)
            delete_after_upload = True
        elif isinstance(artifact_object, Image.Image):
            artifact_type = 'image'
            artifact_type_data.content_type = 'image/png'
            desc = str(artifact_object.__repr__())
            artifact_type_data.preview = desc[1:desc.find(' at ')]
            override_filename_ext_in_uri = '.png'
            override_filename_in_uri = name + override_filename_ext_in_uri
            fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.',
                                         suffix=override_filename_ext_in_uri)
            os.close(fd)
            artifact_object.save(local_filename)
            delete_after_upload = True
        elif isinstance(artifact_object, dict):
            artifact_type = 'JSON'
            artifact_type_data.content_type = 'application/json'
            preview = json.dumps(artifact_object, sort_keys=True, indent=4)
            override_filename_ext_in_uri = '.json'
            override_filename_in_uri = name + override_filename_ext_in_uri
            fd, local_filename = mkstemp(prefix=quote(name, safe="") + '.',
                                         suffix=override_filename_ext_in_uri)
            os.write(fd, bytes(preview.encode()))
            os.close(fd)
            artifact_type_data.preview = preview
            delete_after_upload = True
        elif (isinstance(artifact_object, six.string_types)
              and urlparse(artifact_object).scheme in remote_driver_schemes):
            # we should not upload this, just register
            local_filename = None
            uri = artifact_object
            artifact_type = 'custom'
            artifact_type_data.content_type = mimetypes.guess_type(
                artifact_object)[0]
        elif isinstance(artifact_object, six.string_types + (Path, )):
            # check if single file
            artifact_object = Path(artifact_object)

            artifact_object.expanduser().absolute()
            try:
                create_zip_file = not artifact_object.is_file()
            except Exception:  # Hack for windows pathlib2 bug, is_file isn't valid.
                create_zip_file = True
            else:  # We assume that this is not Windows os
                if artifact_object.is_dir():
                    # change to wildcard
                    artifact_object /= '*'

            if create_zip_file:
                folder = Path('').joinpath(*artifact_object.parts[:-1])
                if not folder.is_dir() or not folder.parts:
                    raise ValueError(
                        "Artifact file/folder '{}' could not be found".format(
                            artifact_object.as_posix()))

                wildcard = artifact_object.parts[-1]
                files = list(Path(folder).rglob(wildcard))
                override_filename_ext_in_uri = '.zip'
                override_filename_in_uri = folder.parts[
                    -1] + override_filename_ext_in_uri
                fd, zip_file = mkstemp(
                    prefix=quote(folder.parts[-1], safe="") + '.',
                    suffix=override_filename_ext_in_uri)
                try:
                    artifact_type_data.content_type = 'application/zip'
                    artifact_type_data.preview = 'Archive content {}:\n'.format(
                        artifact_object.as_posix())

                    with ZipFile(zip_file,
                                 'w',
                                 allowZip64=True,
                                 compression=ZIP_DEFLATED) as zf:
                        for filename in sorted(files):
                            if filename.is_file():
                                relative_file_name = filename.relative_to(
                                    folder).as_posix()
                                artifact_type_data.preview += '{} - {}\n'.format(
                                    relative_file_name,
                                    humanfriendly.format_size(
                                        filename.stat().st_size))
                                zf.write(filename.as_posix(),
                                         arcname=relative_file_name)
                except Exception as e:
                    # failed uploading folder:
                    LoggerRoot.get_base_logger().warning(
                        'Exception {}\nFailed zipping artifact folder {}'.
                        format(folder, e))
                    return None
                finally:
                    os.close(fd)

                artifact_object = zip_file
                artifact_type = 'archive'
                artifact_type_data.content_type = mimetypes.guess_type(
                    artifact_object)[0]
                local_filename = artifact_object
                delete_after_upload = True
            else:
                if not artifact_object.is_file():
                    raise ValueError(
                        "Artifact file '{}' could not be found".format(
                            artifact_object.as_posix()))

                override_filename_in_uri = artifact_object.parts[-1]
                artifact_object = artifact_object.as_posix()
                artifact_type = 'custom'
                artifact_type_data.content_type = mimetypes.guess_type(
                    artifact_object)[0]
                local_filename = artifact_object
        else:
            raise ValueError("Artifact type {} not supported".format(
                type(artifact_object)))

        # remove from existing list, if exists
        for artifact in self._task_artifact_list:
            if artifact.key == name:
                if artifact.type == self._pd_artifact_type:
                    raise ValueError(
                        "Artifact of name {} already registered, "
                        "use register_artifact instead".format(name))

                self._task_artifact_list.remove(artifact)
                break

        if not local_filename:
            file_size = None
            file_hash = None
        else:
            # check that the file to upload exists
            local_filename = Path(local_filename).absolute()
            if not local_filename.exists() or not local_filename.is_file():
                LoggerRoot.get_base_logger().warning(
                    'Artifact upload failed, cannot find file {}'.format(
                        local_filename.as_posix()))
                return False

            file_hash, _ = self.sha256sum(local_filename.as_posix())
            file_size = local_filename.stat().st_size

            uri = self._upload_local_file(
                local_filename,
                name,
                delete_after_upload=delete_after_upload,
                override_filename=override_filename_in_uri,
                override_filename_ext=override_filename_ext_in_uri)

        timestamp = int(time())

        artifact = tasks.Artifact(
            key=name,
            type=artifact_type,
            uri=uri,
            content_size=file_size,
            hash=file_hash,
            timestamp=timestamp,
            type_data=artifact_type_data,
            display_data=[(str(k), str(v))
                          for k, v in metadata.items()] if metadata else None)

        # update task artifacts
        with self._task_edit_lock:
            self._task_artifact_list.append(artifact)
            self._task.set_artifacts(self._task_artifact_list)

        return True

    def flush(self):
        # type: () -> ()
        # start the thread if it hasn't already:
        self._start()
        # flush the current state of all artifacts
        self._flush_event.set()

    def stop(self, wait=True):
        # type: (str) -> ()
        # stop the daemon thread and quit
        # wait until thread exists
        self._exit_flag = True
        self._flush_event.set()
        if wait:
            if self._thread:
                self._thread.join()
            # remove all temp folders
            for f in self._temp_folder:
                try:
                    Path(f).rmdir()
                except Exception:
                    pass

    def _start(self):
        # type: () -> ()
        """ Start daemon thread if any artifacts are registered and thread is not up yet """
        if not self._thread and self._artifacts_container:
            # start the daemon thread
            self._flush_event.clear()
            self._thread = Thread(target=self._daemon)
            self._thread.daemon = True
            self._thread.start()

    def _daemon(self):
        # type: () -> ()
        while not self._exit_flag:
            self._flush_event.wait(self._flush_frequency_sec)
            self._flush_event.clear()
            artifact_keys = list(self._artifacts_container.keys())
            for name in artifact_keys:
                try:
                    self._upload_data_audit_artifacts(name)
                except Exception as e:
                    LoggerRoot.get_base_logger().warning(str(e))

        # create summary
        self._summary = self._get_statistics()

    def _upload_data_audit_artifacts(self, name):
        # type: (str) -> ()
        logger = self._task.get_logger()
        pd_artifact = self._artifacts_container.get(name)
        pd_metadata = self._artifacts_container.get_metadata(name)

        # remove from artifacts watch list
        if name in self._unregister_request:
            try:
                self._unregister_request.remove(name)
            except KeyError:
                pass
            self._artifacts_container.unregister_artifact(name)

        if pd_artifact is None:
            return

        override_filename_ext_in_uri = self._save_format
        override_filename_in_uri = name
        fd, local_csv = mkstemp(prefix=quote(name, safe="") + '.',
                                suffix=override_filename_ext_in_uri)
        os.close(fd)
        local_csv = Path(local_csv)
        pd_artifact.to_csv(local_csv.as_posix(),
                           index=False,
                           compression=self._compression)
        current_sha2, file_sha2 = self.sha256sum(local_csv.as_posix(),
                                                 skip_header=32)
        if name in self._last_artifacts_upload:
            previous_sha2 = self._last_artifacts_upload[name]
            if previous_sha2 == current_sha2:
                # nothing to do, we can skip the upload
                try:
                    local_csv.unlink()
                except Exception:
                    pass
                return
        self._last_artifacts_upload[name] = current_sha2

        # If old trains-server, upload as debug image
        if not Session.check_min_api_version('2.3'):
            logger.report_image(title='artifacts',
                                series=name,
                                local_path=local_csv.as_posix(),
                                delete_after_upload=True,
                                iteration=self._task.get_last_iteration(),
                                max_image_history=2)
            return

        # Find our artifact
        artifact = None
        for an_artifact in self._task_artifact_list:
            if an_artifact.key == name:
                artifact = an_artifact
                break

        file_size = local_csv.stat().st_size

        # upload file
        uri = self._upload_local_file(
            local_csv,
            name,
            delete_after_upload=True,
            override_filename=override_filename_in_uri,
            override_filename_ext=override_filename_ext_in_uri)

        # update task artifacts
        with self._task_edit_lock:
            if not artifact:
                artifact = tasks.Artifact(key=name,
                                          type=self._pd_artifact_type)
                self._task_artifact_list.append(artifact)
            artifact_type_data = tasks.ArtifactTypeData()

            artifact_type_data.data_hash = current_sha2
            artifact_type_data.content_type = "text/csv"
            artifact_type_data.preview = str(
                pd_artifact.__repr__()) + '\n\n' + self._get_statistics(
                    {name: pd_artifact})

            artifact.type_data = artifact_type_data
            artifact.uri = uri
            artifact.content_size = file_size
            artifact.hash = file_sha2
            artifact.timestamp = int(time())
            artifact.display_data = [
                (str(k), str(v)) for k, v in pd_metadata.items()
            ] if pd_metadata else None

            self._task.set_artifacts(self._task_artifact_list)

    def _upload_local_file(self,
                           local_file,
                           name,
                           delete_after_upload=False,
                           override_filename=None,
                           override_filename_ext=None):
        # type: (str, str, bool, Optional[str], Optional[str]) -> str
        """
        Upload local file and return uri of the uploaded file (uploading in the background)
        """
        upload_uri = self._task.output_uri or self._task.get_logger(
        ).get_default_upload_destination()
        if not isinstance(local_file, Path):
            local_file = Path(local_file)
        ev = UploadEvent(
            metric='artifacts',
            variant=name,
            image_data=None,
            upload_uri=upload_uri,
            local_image_path=local_file.as_posix(),
            delete_after_upload=delete_after_upload,
            override_filename=override_filename,
            override_filename_ext=override_filename_ext,
            override_storage_key_prefix=self._get_storage_uri_prefix())
        _, uri = ev.get_target_full_upload_uri(upload_uri)

        # send for upload
        self._task.reporter._report(ev)

        return uri

    def _get_statistics(self, artifacts_dict=None):
        # type: (Optional[Dict[str, Artifact]]) -> str
        summary = ''
        artifacts_dict = artifacts_dict or self._artifacts_container
        thread_pool = ThreadPool()

        try:
            # build hash row sets
            artifacts_summary = []
            for a_name, a_df in artifacts_dict.items():
                hash_cols = self._artifacts_container.get_hash_columns(a_name)
                if not pd or not isinstance(a_df, pd.DataFrame):
                    continue

                if hash_cols is True:
                    hash_col_drop = []
                else:
                    hash_cols = set(hash_cols)
                    missing_cols = hash_cols.difference(a_df.columns)
                    if missing_cols == hash_cols:
                        LoggerRoot.get_base_logger().warning(
                            'Uniqueness columns {} not found in artifact {}. '
                            'Skipping uniqueness check for artifact.'.format(
                                list(missing_cols), a_name))
                        continue
                    elif missing_cols:
                        # missing_cols must be a subset of hash_cols
                        hash_cols.difference_update(missing_cols)
                        LoggerRoot.get_base_logger().warning(
                            'Uniqueness columns {} not found in artifact {}. Using {}.'
                            .format(list(missing_cols), a_name,
                                    list(hash_cols)))

                    hash_col_drop = [
                        col for col in a_df.columns if col not in hash_cols
                    ]

                a_unique_hash = set()

                def hash_row(r):
                    a_unique_hash.add(hash(bytes(r)))

                a_shape = a_df.shape
                # parallelize
                a_hash_cols = a_df.drop(columns=hash_col_drop)
                thread_pool.map(hash_row, a_hash_cols.values)
                # add result
                artifacts_summary.append((
                    a_name,
                    a_shape,
                    a_unique_hash,
                ))

            # build intersection summary
            for i, (name, shape, unique_hash) in enumerate(artifacts_summary):
                summary += '[{name}]: shape={shape}, {unique} unique rows, {percentage:.1f}% uniqueness\n'.format(
                    name=name,
                    shape=shape,
                    unique=len(unique_hash),
                    percentage=100 * len(unique_hash) / float(shape[0]))
                for name2, shape2, unique_hash2 in artifacts_summary[i + 1:]:
                    intersection = len(unique_hash & unique_hash2)
                    summary += '\tIntersection with [{name2}] {intersection} rows: {percentage:.1f}%\n'.format(
                        name2=name2,
                        intersection=intersection,
                        percentage=100 * intersection /
                        float(len(unique_hash2)))
        except Exception as e:
            LoggerRoot.get_base_logger().warning(str(e))
        finally:
            thread_pool.close()
            thread_pool.terminate()
        return summary

    def _get_temp_folder(self, force_new=False):
        # type: (bool) -> str
        if force_new or not self._temp_folder:
            new_temp = mkdtemp(prefix='artifacts_')
            self._temp_folder.append(new_temp)
            return new_temp
        return self._temp_folder[0]

    def _get_storage_uri_prefix(self):
        # type: () -> str
        if not self._storage_prefix:
            self._storage_prefix = self._task._get_output_destination_suffix()
        return self._storage_prefix

    @staticmethod
    def sha256sum(filename, skip_header=0):
        # type: (str, int) -> (Optional[str], Optional[str])
        # create sha2 of the file, notice we skip the header of the file (32 bytes)
        # because sometimes that is the only change
        h = hashlib.sha256()
        file_hash = hashlib.sha256()
        b = bytearray(Artifacts._hash_block_size)
        mv = memoryview(b)
        try:
            with open(filename, 'rb', buffering=0) as f:
                # skip header
                if skip_header:
                    file_hash.update(f.read(skip_header))
                for n in iter(lambda: f.readinto(mv), 0):
                    h.update(mv[:n])
                    if skip_header:
                        file_hash.update(mv[:n])
        except Exception as e:
            LoggerRoot.get_base_logger().warning(str(e))
            return None, None

        return h.hexdigest(), file_hash.hexdigest() if skip_header else None
Exemple #57
0
class Sentinel(object):
    def __init__(self, stop_event, start_event, start=True):
        # Make sure we catch signals for the pool
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        signal.signal(signal.SIGTERM, signal.SIG_DFL)
        self.pid = current_process().pid
        self.parent_pid = get_ppid()
        self.name = current_process().name
        self.stop_event = stop_event
        self.start_event = start_event
        self.event_out = Event()
        self.cluster = None
        self.dispatcher = None
        self.webapper = None
        if start:
            self.start()

    def start(self):
        self.cluster = self.spawn_cluster()
        self.dispatcher = self.spawn_dispatcher()
        self.webapper = self.spawn_webapper()
        self.guard()

    def spawn_process(self, target, *args):
        """
        :type target: function or class
        """
        p = Process(target=target, args=args)
        if target == cluster:
            p.daemon = False
        else:
            p.daemon = True
        p.start()
        return p

    def spawn_cluster(self):
        return self.spawn_process(cluster)

    def spawn_dispatcher(self):
        return self.spawn_process(dispatcher, self.event_out)

    def spawn_webapper(self):
        return self.spawn_process(webapper)

    def guard(self):
        logger.info('{} guarding Application at {}'.format(
            current_process().name, self.pid))
        self.start_event.set()
        logger.info('Application-{} running.'.format(self.parent_pid))
        cycle = 0.5  # guard loop sleep in seconds
        # guard loop. Runs at least once
        while not self.stop_event.is_set():
            # Check dispatcher
            if not self.dispatcher.is_alive():
                self.dispatcher = self.spawn_dispatcher()
                logger.error(
                    'reincarnated dispatcher {} after sudden death'.format(
                        self.dispatcher.name))
            sleep(cycle)
        self.stop()

    def stop(self):
        name = current_process().name
        logger.info('{} stopping application processes'.format(name))
        # Stopping pusher
        self.event_out.set()
        # Wait for it to stop
        while self.cluster.is_alive():
            sleep(0.1)
        while self.webapper.is_alive():
            sleep(0.1)
Exemple #58
0
class SSHLoggerBase(NodeLoggerBase):
    _retrieve_message = "Reading Scylla logs from {since}"

    def __init__(self, node, target_log_file: str):
        super().__init__(node, target_log_file)
        self._termination_event = Event()
        self.node = node
        self._remoter = None
        self._remoter_params = node.remoter.get_init_arguments()
        self._child_process = Process(target=self._journal_thread, daemon=True)

    @property
    @abstractmethod
    def _logger_cmd(self) -> str:
        pass

    def _file_exists(self, file_path):
        try:
            result = self._remoter.run('sudo test -e %s' % file_path,
                                       ignore_status=True)
            return result.exit_status == 0
        except Exception as details:  # pylint: disable=broad-except
            self._log.error('Error checking if file %s exists: %s', file_path,
                            details)
        return False

    def _log_retrieve(self, since):
        if not since:
            since = 'the beginning'
        self._log.debug(self._retrieve_message.format(since=since))

    def _retrieve(self, since):
        since = '--since "{}" '.format(since) if since else ""
        self._remoter.run(self._logger_cmd.format(since=since),
                          verbose=True,
                          ignore_status=True,
                          log_file=self._target_log_file)

    def _retrieve_journal(self, since):
        try:
            self._log_retrieve(since)
            self._retrieve(since)
        except Exception as details:  # pylint: disable=broad-except
            self._log.error('Error retrieving remote node DB service log: %s',
                            details)

    @raise_event_on_failure
    def _journal_thread(self):
        self._remoter = RemoteCmdRunnerBase.create_remoter(
            **self._remoter_params)
        read_from_timestamp = None
        while not self._termination_event.is_set():
            self._wait_ssh_up(verbose=False)
            self._retrieve_journal(since=read_from_timestamp)
            read_from_timestamp = datetime.utcnow().strftime(
                "%Y-%m-%d %H:%M:%S")

    def _wait_ssh_up(self, verbose=True, timeout=500):
        text = None
        if verbose:
            text = '%s: Waiting for SSH to be up' % self
        wait.wait_for(func=self._remoter.is_up,
                      step=10,
                      text=text,
                      timeout=timeout,
                      throw_exc=True)

    def start(self):
        self._child_process.start()

    def stop(self, timeout=None):
        self._child_process.terminate()
        self._child_process.join(timeout)
        if self._child_process.is_alive():
            self._child_process.kill()  # pylint: disable=no-member
Exemple #59
0
import os

# 创建读取摄像头视频流的队列
rightFrameQue = Queue()
middleFrameQue = Queue()
leftFrameQue = Queue()
# 创建检测人脸进程的队列
rightVideoQue = Queue()
middleVideoQue = Queue()
leftVideoQue = Queue()
# 创建识别人脸进程的队列
alreadyQue = Queue()
# 定义一个进程锁,alreadyInList每次只能有一个进程访问
mutex = Lock()
# 创建两个事件,用来调度人脸识别函数
middleVideoEvent = Event()
leftVideoEvent = Event()
rightVideoEvent = Event()
# 创建两个事件,用来调度人脸检测函数
frameEvent = Event()


def main():

    # 首先创建进程
    pRecieveMiddle = Process(target=video_recieve.recieve,
                             args=(middleFrameQue, "192.168.1.105"))
    pDetectMiddle = Process(target=video_detect.detect,
                            args=(middleFrameQue, middleVideoQue, "OUT",
                                  middleVideoEvent))
    pProcessMiddle = Process(target=video_process.process,
Exemple #60
0
class MultiProcessConsumer(Consumer):
    """
    A consumer implementation that consumes partitions for a topic in
    parallel using multiple processes

    client: a connected KafkaClient
    group: a name for this consumer, used for offset storage and must be unique
    topic: the topic to consume

    auto_commit: default True. Whether or not to auto commit the offsets
    auto_commit_every_n: default 100. How many messages to consume
                         before a commit
    auto_commit_every_t: default 5000. How much time (in milliseconds) to
                         wait before commit
    num_procs: Number of processes to start for consuming messages.
               The available partitions will be divided among these processes
    partitions_per_proc: Number of partitions to be allocated per process
               (overrides num_procs)

    Auto commit details:
    If both auto_commit_every_n and auto_commit_every_t are set, they will
    reset one another when one is triggered. These triggers simply call the
    commit method on this class. A manual call to commit will also reset
    these triggers
    """
    def __init__(self, client, group, topic, auto_commit=True,
                 auto_commit_every_n=AUTO_COMMIT_MSG_COUNT,
                 auto_commit_every_t=AUTO_COMMIT_INTERVAL,
                 num_procs=1, partitions_per_proc=0):

        # Initiate the base consumer class
        super(MultiProcessConsumer, self).__init__(
            client, group, topic,
            partitions=None,
            auto_commit=auto_commit,
            auto_commit_every_n=auto_commit_every_n,
            auto_commit_every_t=auto_commit_every_t)

        # Variables for managing and controlling the data flow from
        # consumer child process to master
        self.queue = Queue(1024)    # Child consumers dump messages into this
        self.start = Event()        # Indicates the consumers to start fetch
        self.exit = Event()         # Requests the consumers to shutdown
        self.pause = Event()        # Requests the consumers to pause fetch
        self.size = Value('i', 0)   # Indicator of number of messages to fetch

        partitions = self.offsets.keys()

        # If unspecified, start one consumer per partition
        # The logic below ensures that
        # * we do not cross the num_procs limit
        # * we have an even distribution of partitions among processes
        if not partitions_per_proc:
            partitions_per_proc = round(len(partitions) * 1.0 / num_procs)
            if partitions_per_proc < num_procs * 0.5:
                partitions_per_proc += 1

        # The final set of chunks
        chunker = lambda *x: [] + list(x)
        chunks = map(chunker, *[iter(partitions)] * int(partitions_per_proc))

        self.procs = []
        for chunk in chunks:
            chunk = filter(lambda x: x is not None, chunk)
            args = (client.copy(),
                    group, topic, chunk,
                    self.queue, self.start, self.exit,
                    self.pause, self.size)

            proc = Process(target=_mp_consume, args=args)
            proc.daemon = True
            proc.start()
            self.procs.append(proc)

    def stop(self):
        # Set exit and start off all waiting consumers
        self.exit.set()
        self.pause.set()
        self.start.set()

        for proc in self.procs:
            proc.join()
            proc.terminate()

        super(MultiProcessConsumer, self).stop()

    def __iter__(self):
        """
        Iterator to consume the messages available on this consumer
        """
        # Trigger the consumer procs to start off.
        # We will iterate till there are no more messages available
        self.size.value = 0
        self.pause.set()

        while True:
            self.start.set()
            try:
                # We will block for a small while so that the consumers get
                # a chance to run and put some messages in the queue
                # TODO: This is a hack and will make the consumer block for
                # at least one second. Need to find a better way of doing this
                partition, message = self.queue.get(block=True, timeout=1)
            except Empty:
                break

            # Count, check and commit messages if necessary
            self.offsets[partition] = message.offset
            self.start.clear()
            yield message

            self.count_since_commit += 1
            self._auto_commit()

        self.start.clear()

    def get_messages(self, count=1, block=True, timeout=10):
        """
        Fetch the specified number of messages

        count: Indicates the maximum number of messages to be fetched
        block: If True, the API will block till some messages are fetched.
        timeout: If None, and block=True, the API will block infinitely.
                 If >0, API will block for specified time (in seconds)
        """
        messages = []

        # Give a size hint to the consumers. Each consumer process will fetch
        # a maximum of "count" messages. This will fetch more messages than
        # necessary, but these will not be committed to kafka. Also, the extra
        # messages can be provided in subsequent runs
        self.size.value = count
        self.pause.clear()

        while count > 0:
            # Trigger consumption only if the queue is empty
            # By doing this, we will ensure that consumers do not
            # go into overdrive and keep consuming thousands of
            # messages when the user might need only a few
            if self.queue.empty():
                self.start.set()

            try:
                partition, message = self.queue.get(block, timeout)
            except Empty:
                break

            messages.append(message)

            # Count, check and commit messages if necessary
            self.offsets[partition] = message.offset
            self.count_since_commit += 1
            self._auto_commit()
            count -= 1

        self.size.value = 0
        self.start.clear()
        self.pause.set()

        return messages