예제 #1
0
파일: io_base.py 프로젝트: abeaumont/foos
class IOBase:
    def __init__(self, bus):
        self.bus = bus
        self.bus.subscribe(self.process_event, thread=False)
        self.write_queue = Queue(10)
        self.reader = threading.Thread(target=self.reader_thread)
        self.reader.daemon = True
        self.reader.start()
        self.writer = threading.Thread(target=self.writer_thread)
        self.writer.daemon = True
        self.writer.start()

    def reader_thread(self):
        raise NotImplementedError()

    def writer_thread(self):
        raise NotImplementedError()

    def convert_data(self, data):
        return data

    def process_event(self, ev):
        try:
            if ev.name == "leds_enabled":
                data = self.convert_data(ev.data)
                self.write_queue.put_nowait(data)
        except queue.Full:
            # TODO alert somehow without flooding?
            pass
예제 #2
0
class LoggingServer(Process, UDPServer):

    def __init__ (self):   
        from app import APP
        port = APP.BE.LOGGING._i_server_port

        UDPServer.__init__(self, ('127.0.0.1', port), None)
        Process.__init__ (self, None, None, "pcLOG" )        
        
        self.queue = Queue( APP.BE.LOGGING._i_queue_size )        
        self.backend = LoggingBackend (self.queue)
        self.backend.start()
        self.on = True
        self.start()
    
    def finish_request(self, request, client_address):        
        from app import APP
        APP.Functions.set_process_name ( "0@@LOG" )
        data, sock = request
        try:
            (src, svr,msg) = cPickle.loads ( data )                    
            self.queue.put_nowait ( (time.time(), src,svr,msg) )
            if src is None: self.on = False
        except ThQueue.Full:
            print "Full"
        except:
            pass
        
    def run(self):
        while self.on:
            self.handle_request()        
        self.queue.put ( (None, None, None, None) )
예제 #3
0
def videoCallback( frame, robot=None, debug=False ):
    global g_queueOut, g_processor
    if g_queueOut is None:
        g_queueOut = Queue()
        g_processor = Process( target=processMain, args=(g_queueOut,) )
        g_processor.daemon = True
        g_processor.start()
    g_queueOut.put_nowait( frame ) # H264 compressed video frame
예제 #4
0
파일: utils.py 프로젝트: xhacker/mutpy
class MutationTestRunnerProcess(MutationTestRunner, Process):

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.queue = Queue()

    def get_result(self, live_time):
        try:
            return self.queue.get(timeout=live_time)
        except Empty:
            return None

    def set_result(self, result):
        self.queue.put_nowait(result.serialize())
def main():
    files_to_download = Queue()
    with open('downloads.txt', 'r') as f:
        for to_download in f:
            files_to_download.put_nowait(to_download.split('\n')[0])
    print("=== puremultiprocessing ===")
    total_processors = cpu_count()
    start = datetime.datetime.now()
    jobs = []
    for i in range(total_processors):
        p = Process(target = download, args=(files_to_download,))
        jobs.append(p)
        p.start()

    for j in jobs:
        j.join()
    print("{0} took {1} seconds to complete all downloads".format(__file__,\
                                            datetime.datetime.now() - start))
예제 #6
0
class SpectrumLightController(object):
    def __init__(self, sampleGen):
        self.sampleGen = sampleGen

        sampleGen.onSample.add(self._onSample)
        sampleGen.onSongChanged.add(self._onSongChanged)

        atexit.register(self._onExit)

        self.messageQueue = Queue()

        self.process = Process(target=self.lightControllerProcess, args=(self.messageQueue, ))
        self.process.start()

    def _onSongChanged(self):
        try:
            self.messageQueue.put_nowait(('songChange', self.sampleGen.currentFilename))
        except QueueFull:
            ansi.error("Message queue to light process full! Continuing...")

    def _onSample(self):
        try:
            self.messageQueue.put_nowait(('chunk', self.sampleGen.currentData))
        except QueueFull:
            ansi.error("Message queue to light process full! Continuing...")

    def _onExit(self):
        if self.process.is_alive():
            try:
                self.messageQueue.put(('end', ))
            except QueueFull:
                ansi.error("Message queue to light process full! Continuing...")

    @staticmethod
    def lightControllerProcess(messageQueue):
        import lights

        if lightProcessNice:
            os.nice(lightProcessNice)

        analyzer = lights.SpectrumAnalyzer(messageQueue, gcp)
        analyzer.loop()
예제 #7
0
class BlockingQueueGenerator(object):

    """
    Provides a generator that utilizes a bounded, blocking queue to store objects.  Useful for async sourcing agents
    that need provide a stream of objects over time as a generator.
    """

    def __init__(self, maxsize=1000):
        self._data = Queue(maxsize=maxsize)

    def __iter__(self):
        return self

    def __next__(self):
        return self.next()

    def next(self):
        return self._data.get(block=True)

    def emit(self, obj):
        self._data.put_nowait(obj)
예제 #8
0
class GameClient:
    def __init__(self, server_addres):
        self.socket = None
        self.message_queue = Queue()
        self.event_queue = Queue()
        self.server_addres = server_addres
        self.pygame_client = None

    async def connect(self):
        session = ClientSession()
        self.socket = await session.ws_connect(self.server_addres)
        log.info('connected')

    async def handle_messages(self):
        while True:
            message = await self.socket.receive_json()
            self.message_queue.put_nowait(message)
            await asyncio.sleep(TIME_STEP)

    async def handle_events(self):
        while True:
            try:
                event = self.event_queue.get_nowait()
                self.socket.send_json(event)
            except Empty:
                await asyncio.sleep(TIME_STEP)

    def spawn_drawer_process(self):
        self.pygame_client.initialize_screen()
        Process(target=self.pygame_client.run).start()

    def run(self, loop=None):
        self.pygame_client = PygameClient(self.message_queue, self.event_queue)
        self.spawn_drawer_process()
        loop = loop or asyncio.get_event_loop()
        loop.run_until_complete(self.connect())
        loop.create_task(self.handle_events())
        loop.create_task(self.handle_messages())
        loop.run_forever()
예제 #9
0
파일: queue.py 프로젝트: antiface/spectral
class SafeQueue:
    """ Safe Queue implementation is a wrapper around standard multiprocessing
        queue. Implements safe queuing and dequeueing. """

    def __init__(self, size=10):
        self._queue = Queue(size)
        self._lock = Lock()

    def queue(self, inp):
        self._lock.acquire()
        if self._queue.full():
            self._queue.get()
        self._queue.put_nowait(inp)
        self._lock.release()

    def dequeue(self):
        self._lock.acquire()
        item = None
        if not self._queue.empty():
            item = self._queue.get_nowait()
        self._lock.release()
        return item
예제 #10
0
파일: player.py 프로젝트: LSDG/music-lights
class SpectrumLightController(object):
    def __init__(self, sampleGen):
        self.sampleGen = sampleGen

        sampleGen.onSample.add(self._onSample)
        sampleGen.onSongChanged.add(self._onSongChanged)

        atexit.register(self._onExit)

        if useGPIO:
            import lights_gpio as lights
        else:
            import lights

        self.messageQueue = Queue()

        self.subProcess = Process(target=lights.runLightsProcess, args=(self.messageQueue, ))
        self.subProcess.start()

    def _onSongChanged(self, tags, songInfo):
        try:
            self.messageQueue.put_nowait(('songChange', self.sampleGen.currentFilename, songInfo))
        except QueueFull:
            ansi.error("Message queue to light process full! Continuing...")

    def _onSample(self, data):
        try:
            if isinstance(data, buffer):
                data = bytes(data)
            self.messageQueue.put_nowait(('chunk', data))
        except QueueFull:
            ansi.error("Message queue to light process full! Continuing...")

    def _onExit(self):
        if self.subProcess.is_alive():
            try:
                self.messageQueue.put(('end', ))
            except QueueFull:
                ansi.error("Message queue to light process full! Continuing...")
class Window(Borg):
    def __init__(self, width, height):
        super(Window, self).__init__()
        self._draw_queue = Queue()
        self._input_queue = Queue()
        self._window = _GameWindow(width, height, self._draw_queue, self._input_queue)
        self._window.start()

    def blit(self, source, dest, area=None, special_flags = 0):
        self._draw_queue.put_nowait([pygame.image.tostring(source, "RGBA"),
                                     source.get_width(), source.get_height(),
                                     dest, area, special_flags])

    def quit(self):
        self._window.quit.value = True
        self._window.join()

    @property
    def events(self):
        if not self._input_queue.empty():
            return self._input_queue.get()
        else:
            return []
예제 #12
0
class FileController(Thread):
    def __init__(self, period):
        Thread.__init__(self)
        self.daemon = True
        self.cnt = 0
        self.period = period
        self.que = Queue(1024*8)
        self.dorun = True

    def run(self):
        while (self.dorun):
            time.sleep(self.period)
            tm = dt.datetime.now().strftime('%Y%m%d_%H.log')
            with open(tm, 'a') as f:
                while (self.que.empty() == False):
                    entry = self.getNext()
                    self.writeFile(f, entry)
                    f.closed

    def writeFile(self, file, entry):
        file.write('{}\n'.format(entry))

    def stopit(self):
        self.dorun = False
        
    def getNext(self):
            try:
                return self.que.get(True, timeout=60)    
            except:
                print("Exception")

    def read_queue(self):
        yield self.que.get(True, None)
        
    def putNext(self, entry):
        self.que.put_nowait(entry)
예제 #13
0
def getNewPicture( firstInit ):
    global g_queueOut, g_processor, g_queueResults
    if g_queueOut is None:
        g_queueOut = Queue()
        g_queueResults = Queue()
        g_processor = Process( target=processMain, args=(g_queueOut,g_queueResults,) )
        g_processor.daemon = True
        g_processor.start()
    if firstInit:
        g_queueOut.put_nowait( 1 )
        return None

    elif firstInit is None:
        g_queueOut.put_nowait( None )
        time.sleep(1)
        g_queueOut = None

    else:
        if g_queueResults.empty():
            return None
        ret = g_queueResults.get()
        g_queueOut.put_nowait( 1 )
        return ret        
예제 #14
0
class MultiProcessingHandler(logging.Handler):

    def __init__(self, name: str="", sub_handler: logging.Handler=None):
        """
        Create new multiprocess handler instance
        :param name: name of the handler
        :param sub_handler: logging handler e.g. FileHandler
        :return: MultiProcessingHandler
        """
        super(MultiProcessingHandler, self).__init__()

        if sub_handler is None:
            sub_handler = logging.StreamHandler()

        self.sub_handler = sub_handler
        self.queue = Queue(-1)
        self.setLevel(self.sub_handler.level)
        self.setFormatter(self.sub_handler.formatter)
        # The thread handles receiving records asynchronously.
        t = Thread(target=self.receive, name=name)
        t.daemon = True
        t.start()

    def setFormatter(self, fmt: logging.Formatter=None) -> None:
        """
        Set formatter
        :param fmt: formatter
        :return: None
        """
        logging.Handler.setFormatter(self, fmt)
        self.sub_handler.setFormatter(fmt)

    def receive(self) -> None:
        """
        Receive a message from the queue
        :return: None
        """
        while True:
            try:
                record = self.queue.get()
                self.sub_handler.emit(record)
            except (KeyboardInterrupt, SystemExit):
                raise
            except EOFError:
                break

    def send(self, s: logging.LogRecord=None) -> None:
        """
        Set a message at the queue
        :param s: LogRecord with the message info
        :return: None
        """
        self.queue.put_nowait(s)

    def _format_record(self, record: logging.LogRecord=None) -> logging.LogRecord:
        """
        Formatted the LogRecord
        :param record: LogRecord with the message info
        :return: LogRecord with formatted message
        """
        # ensure that exc_info and args
        # have been stringified. Removes any chance of
        # unpickleable things inside and possibly reduces
        # message size sent over the pipe.
        if record.args:
            record.msg = record.msg % record.args
            record.args = None
        if record.exc_info:
            self.format(record)
            record.exc_info = None

        return record

    def emit(self, record: logging.LogRecord=None) -> None:
        """
        Do whatever it takes to actually log the specified logging record.

        This version is intended to be implemented by subclasses and so
        raises a NotImplementedError.
        :param record: LogRecord with the message info
        :return: None
        """
        try:
            s = self._format_record(record)
            self.send(s)
        except (KeyboardInterrupt, SystemExit):
            raise

    def close(self) -> None:
        """
        Close the handler
        :return: None
        """
        self.sub_handler.close()
        logging.Handler.close(self)
예제 #15
0
fdrop = 0
new_img_out = False
new_img_in = False
max_drop = 0
while not done:
    if cam.query_image():
        pyg_img = cam.get_image()
        img = np.ndarray(
            shape=(640, 480, 3), dtype=np.uint8, buffer=pyg_img.get_buffer(), offset=0, strides=(3, 1920, 1)
        ).copy()
        # flag to ensure each frame is put once and only once into the queue
        new_img_in = True

    try:
        if new_img_in == True:
            in_queue.put_nowait(img)
            fdrop = 0
            new_img_in = False
    except Full:
        fdrop += 1
        #        max_drop=max(max_drop,fdrop)
        #        if max_drop==fdrop:
        #            print "Maximum lag by %d loops"%(fdrop)
        pass

    try:
        rects = out_queue.get_nowait()
        new_img_out = True
    except Empty:
        new_img_out = False
    #
예제 #16
0
class Emulated(Driver):

    # hardware defs
    CHARS = 40
    ROWS = 9

    '''driver class that emulates the machine with a GUI

    we fake the :func:`send_data` and :func:`get_data` functions

    The :class:`Display` class is used to show how the braille machine would
    look and provide buttons.

    message passing is done with queues
    '''

    prev_data = [(0,) * 40] * 9

    def __init__(self, delay=0, display_text=False):
        super(Emulated, self).__init__()
        self.data = 0
        self.delay = delay
        self.buttons = {}

        # message passing queues: pass messages to display on parent, fetch
        # messages on chlid
        self.send_queue = Queue()
        self.receive_queue = Queue()
        # start the gui program as a separated process
        self.process = Process(target=qt_display.start,
                               kwargs={
                                   'to_display_queue': self.send_queue,
                                   'from_display_queue': self.receive_queue,
                                   'display_text': display_text
                               })
        self.process.daemon = True
        self.process.start()
        log.info('started qt_display.py with process id %d' % self.process.pid)

    def is_ok(self):
        '''The UI needs to know when to quit, so the GUI can tell it using this
        method'''
        return self.process.is_alive()

    def send_error_sound(self):
        log.info('error sound!')

    def send_ok_sound(self):
        log.info('ok sound!')

    def __exit__(self, ex_type, ex_value, traceback):
        '''__exit__ method allows us to shut down the threads properly'''
        if ex_type is not None:
            log.error('%s : %s' % (ex_type.__name__, ex_value))
        if self.process.is_alive() is None:
            log.info('killing GUI subprocess')
            self.process.terminate()
        log.info('done')

    def __enter__(self):
        '''method required for using the `with` statement'''
        return self

    def get_buttons(self):
        '''Return the current button state and reset to all unpressed

        :rtype: list of 8 elements either set to False (unpressed) or one of
        single, double, long
        '''
        try:
            msg = self.receive_queue.get(timeout=0.01)
            log.debug('got button msg %s' % msg)
            self.buttons[msg['id']] = msg['type']
        except Empty:
            pass

        ret = self.buttons
        # reset
        self.buttons = {}
        return ret

    def send_data(self, cmd, data=[]):
        '''send data to the hardware. We fake the return data by making a note
        of the command the only thing we really do is if the command is to send
        data. Then we pass on to the display emulator

        :param cmd: command byte
        :param data: list of bytes
        '''
        if cmd == comms.CMD_GET_CHARS:
            self.data = Emulated.CHARS
        elif cmd == comms.CMD_GET_ROWS:
            self.data = Emulated.ROWS
        elif cmd == comms.CMD_SEND_PAGE:
            log.error('CMD_SEND_PAGE is no longer supported')
            sys.exit(1)
        elif cmd == comms.CMD_SEND_ERROR:
            log.error('making error sound!')
        elif cmd == comms.CMD_SEND_OK:
            log.error('making OK sound!')
        elif cmd == comms.CMD_SEND_LINE:
            self.data = 0
            self.send_queue.put_nowait([comms.CMD_SEND_LINE] + data)
            self.prev_data[data[0]] = tuple(data[1:])
        elif cmd == comms.CMD_RESET:
            self.data = 0

    def get_data(self, expected_cmd):
        '''gets 2 bytes of data from the hardware - we're faking this so the
        driver doesn't complain

        :param expected_cmd: what command we're expecting (error raised
        otherwise)
        :rtype: an integer return value
        '''
        return self.data

    async def async_get_data(self, expected_cmd):
        '''gets 2 bytes of data from the hardware - we're faking this so the
        driver doesn't complain

        :param expected_cmd: what command we're expecting (error raised
        otherwise)
        :rtype: an integer return value
        '''
        await asyncio.sleep(self.delay / 1000.0)
        return self.data
예제 #17
0
파일: main.py 프로젝트: Frank3W/applogging
    ### log example: wrapping a function in multiprocessing

    # define multiprocessing queue to store logs
    # This queue is reserved for log communication in multiprocessing. It should not used in other places.
    log_queue = Queue()

    # log listener thread
    log_listener_thread = threading.Thread(target=applogger.log_hanlder,
                                           args=(log_queue, ))
    log_listener_thread.start()

    p_list = []
    for i in range(2):
        p = multiprocessing.Process(target=log_function_wrap,
                                    args=(log_queue, ))
        p.name = 'subprocess {}'.format(str(i))
        p_list.append(p)
        p.start()

    for p in p_list:
        p.join()

    # stop signal for log queue
    log_queue.put_nowait(None)
    log_listener_thread.join()

    ### log example: a function in main process
    featureselection.featureselection()

    logging.info('data processing is complete')
예제 #18
0
class ComponentDriver:
    class RESPONSE:
        Callback = 0
        Error = 1

    def __init__(self, name, component_main, keep_alive_always=False, keep_alive_when_outgoing=False):
        self.name = name
        self.component_main = component_main
        self.keep_alive_always = keep_alive_always
        self.keep_alive_when_outgoing = keep_alive_when_outgoing

        self.to_component = Queue()
        self.from_component = Queue()

        self.proc = None
        self.proc_counter = 0
        self.kickstart()

        thread = threading.Thread(target=self.main_loop)
        thread.setDaemon(True)
        thread.start()

        if self.keep_alive_always or self.keep_alive_when_outgoing:
            thread = threading.Thread(target=self.keepalive_loop)
            thread.setDaemon(True)
            thread.start()

        signal_component.connect(self.receive, weak=False)

    def kickstart(self):
        curr_proc_counter = self.proc_counter
        self.proc_counter += 1

        try:
            shutdown.disconnect(self.proc.dispatch_uid)
        except:
            pass
        try:
            self.proc.terminate()
        except:
            pass
        self.proc = Process(target=self.component_main, args=(self.to_component, self.from_component))
        self.proc.daemon = True
        self.proc.start()

        # Daemon flag seems not to work, so do this
        curr_proc = self.proc
        curr_proc.dispatch_uid = curr_proc_counter
        def terminate_proc(sender, **kwargs):
            if curr_proc.is_alive():
                try:
                    if WINDOWS:
                        curr_proc.terminate()
                    else:
                        os.kill(curr_proc.pid, signal.SIGKILL) # Stronger method
                except:
                    pass

        shutdown.connect(terminate_proc, weak=False, dispatch_uid=curr_proc_counter)

    def main_loop(self):
        while True:
            response_type, data = self.from_component.get()

            if response_type == ComponentDriver.RESPONSE.Callback:
                callback, param = data
                CModule.run_script('Tools.callbacks.tryCall("%s", "%s")' % (callback, param))
            elif response_type == ComponentDriver.RESPONSE.Error:
                CModule.show_message('Error', 'Component %s: %s' % (self.name, data))

    def keepalive_loop(self):
        while True:
            time.sleep(1.0)

            # Restart
            if not self.proc.is_alive() and (self.keep_alive_always or (self.keep_alive_when_outgoing and not self.to_component.empty())):
                self.kickstart()
                continue

    def receive(self, sender, **kwargs):
        component_id = kwargs['component_id']
        data = kwargs['data']

        try:
            if component_id == self.name:
                parts = data.split('|')
                command = parts[0]
                params = '|'.join(parts[1:])
                self.to_component.put_nowait((command, params))
        except Exception, e:
            log(logging.ERROR, "Error in %s component: %s" + (self.name, str(e)))

        return ''
예제 #19
0
class MultiprocExecutor(Executor):
    handles_dependencies = False

    def __init__(self, task_ctrl, nproc=8):
        super().__init__(task_ctrl)
        self.nproc = nproc
        self.procs = []

        self.running_tasks = {}
        self.task_queue = None
        self.task_complete_queue = None

    def __enter__(self):
        super().__enter__()

        logger.debug('initializing queues')
        self.task_queue = Queue()
        self.task_complete_queue = Queue()

        logger.debug(f'creating {self.nproc} workers')
        for i in range(self.nproc):
            proc = Process(target=worker,
                           args=(i, self.task_ctrl.name, self.task_queue,
                                 self.task_complete_queue))
            proc.daemon = True
            logger.debug(f'created proc {proc}')
            proc.start()
            self.procs.append(proc)

    def __exit__(self, exc_type, exc_val, exc_tb):
        super().__exit__(exc_type, exc_val, exc_tb)
        logger.debug('finalizing all workers')
        for proc in self.procs:
            self.task_queue.put_nowait(None)

        for proc in self.procs:
            proc.join()

    def _run_task(self, task):
        # N.B. Cannot send Tasks that are build from rules as they do not pickle.
        # Perhaps because of metaclass?
        # Send a key and extract task from a task_ctrl on other side.
        if isinstance(task, RescanFileTask):
            task_type = 'rescan'
            key = str(task.inputs['filepath'])
        else:
            task_type = 'task'
            key = task.path_hash_key()
        logger.debug(f'adding task {key}: {task}')
        self.running_tasks[key] = (task_type, task, key)
        self.task_queue.put((task_type, key, True))

    def can_accept_task(self):
        return len(self.running_tasks) < self.nproc

    def enqueue_task(self, task):
        self._run_task(task)

    def get_completed_task(self):
        logger.debug('ctrl no tasks available - wait for completed')
        remote_task_key, success, error = self.task_complete_queue.get()
        if not success:
            logger.error(f'Error running {remote_task_key}')
            raise Exception(error)
        logger.debug(f'ctrl receieved: {remote_task_key}')
        task_type, completed_task, key = self.running_tasks.pop(
            remote_task_key)
        logger.debug(f'completed: {completed_task}')
        assert self.can_accept_task()

        return completed_task

    def has_finished(self):
        return not self.running_tasks
예제 #20
0
파일: main.py 프로젝트: Nryanicus/giftris
def main():
    #### Initialisation ####
    
    ## get config data
    WIDTH = ctypes.windll.user32.GetSystemMetrics(0)
    HEIGHT = ctypes.windll.user32.GetSystemMetrics(1)
    CONSTS = {"WIDTH":WIDTH,"HEIGHT":HEIGHT}
    with open("config.txt","r") as config:
        for line in config:
            if line[0] == "#" or line == "\n": continue
            key,value = line.split(" ")
            value = value.strip()
            CONSTS[key] = value
            
    ## get gif library
    gif_path = CONSTS['SOURCE_DIRC']
    assert os.path.exists(gif_path), gif_path+" is not an valid directory"
    all_files = os.listdir(gif_path)
    gif_files = []
    for file in all_files:
        if file[-4:] == ".gif":
            gif_files.append(gif_path+"\\"+file)
            
    ## Packer process Setup
    rect_library = {}
    for filename in gif_files:
        rect_library[filename] = Rect(filename)
    
    # Queues
    pack_in = Queue()    # Queue of rects names
    remove_in = Queue()  # Queue of rect names
    pack_out = Queue()   # Queue of Rects
    remove_out = Queue() # Queue of rects names
    
    rect_in = Queue()    # Queue of rects
    gif_out = Queue()    # Queue of gifs
    
    initial_rects = rect_library.keys()
    shuffle(initial_rects)
    packer = RectPackerManager(pack_in, remove_in, pack_out, remove_out, rect_library, initial_rects, CONSTS)
    packer.start()
    
    ## GifLoader process Setup
    loader = GifLoader(rect_in, gif_out, CONSTS)
    loader.start()
    
    ## pygame bootstrappery
    pygame.init()
    # move display window to top left of screen
    os.environ['SDL_VIDEO_WINDOW_POS'] = '0,0'
    screen = pygame.display.set_mode((WIDTH, HEIGHT),pygame.NOFRAME)
    background = pygame.Surface(screen.get_size())
    background = background.convert()
    # pleasant dark grey
    background.fill((20, 20, 20))
    pygame.display.set_caption('Giftris')
    clock = pygame.time.Clock()
    
    active_sprites = pygame.sprite.Group()
    active_gifs = {}
    
    #### Application Loop ####
    dt = 0
    rects_to_add = [] # rects which need to be added to the packer
    added_rects = initial_rects  # rects which have been added, and must not be again
    completed_gifs = [] # gifs whose animation is done and must be removed from the packer
    packed_rects = [] # rects who have been packed and must have their animation created
    loading_gif = None
    loading_func = None
    j = 0
    while True:
        ## Packer Communication
        
        # if packer is empty, refill it
        if packer.empty():
            rects_to_add = rect_library.keys()
            # don't add rects which are still onscreen to the packer, as this 
            # would cause filenames to be ambiguous ids. And be boring
            for rect_name in added_rects:
                if rect_name in rects_to_add:
                    rects_to_add.remove(rect_name)
            shuffle(rects_to_add)
        # if there are any rects have not been sent, send them    
        i = 0
        for rect_name in rects_to_add:
            if not pack_in.full():
                assert not rect_name in added_rects
                pack_in.put_nowait(rect_name)
                added_rects.append(rect_name)
                i += 1
            else: # keep whatever gifs weren't queued for next iteration 
                rects_to_add = rects_to_add[i:]
                break
        else: # all completed gifs got queued, so empty list
            rects_to_add = []
        
        # send completed gif ids to packer 
        i = 0
        for filename in completed_gifs:
            if not remove_in.full():
                remove_in.put_nowait(filename)
                i += 1
            else:
                completed_gifs = completed_gifs[i:]
                break
        else:
            completed_gifs = []
            
        # get removed rects and kill sprites
        removed_rects = []
        while not remove_out.empty():
            rect_name = remove_out.get_nowait()
            removed_rects.append(rect_name)
            added_rects.remove(rect_name)
        for rect_name in removed_rects:
            gif = active_gifs.pop(rect_name)
            gif.kill()
            active_sprites.remove(gif)
        
        ## add new sprites
        # get packed rects from packer
        while not pack_out.empty():
            rect = pack_out.get_nowait()
            packed_rects.append(rect)
            assert not rect.filename in active_gifs, rect.filename
            assert not loading_gif or not rect.filename == loading_gif.filename, rect.filename
        
        # send rects to have it's animation loaded
        i = 0
        for rect in packed_rects:
            if not rect_in.full():
                assert not rect.filename in active_gifs
                rect_in.put_nowait(rect)
                #print "putting into loader",rect.filename
                i += 1
            else:
                packed_rects = packed_rects[i:]
                break
        else:
            packed_rects = []
        
        # receive loaded animations and add them to rendering pipeline
        if not loading_gif and not gif_out.empty():
            loading_gif = gif_out.get_nowait()
            #print "getting from loader",loading_gif.filename
            loading_func = loading_gif.ready()
        # continue loading gifs
        if loading_gif:
            complete = False
            try:
                while loading_func.next():
                    j += 1
                    pass
            except StopIteration:
                complete = True
            if complete:
                j = 0
                assert not loading_gif.filename in active_gifs, loading_gif.filename
                active_gifs[loading_gif.filename] = loading_gif
                active_sprites.add(loading_gif)
                loading_gif = None
                loading_func = None
                complete = False
        
        ## Render
        screen.blit(background, (0,0))
        
        active_sprites.draw(screen)
        pygame.display.update()
        active_sprites.update(active_sprites, dt, completed_gifs)
        
        ## Framerate
        dt = clock.tick(30)
        ## IO
        for event in pygame.event.get():
            if event.type == pygame.KEYDOWN:
                if event.key == pygame.K_ESCAPE:
                    pygame.quit()
                    sys.exit()
                '''if event.key == pygame.K_SPACE:
                    import code
                    code.interact(local=locals())'''
                    
        # DEBUG:
        if not packer.is_alive():
            return
예제 #21
0
class Game(object):
    def __init__(self, M, N, K, player_classes, timeout=None):
        self.players = Player.create_players(player_classes)
        self.state = State.initial(M, N, K, self.players[0])
        self.timeout = timeout

    def play(self):
        while not self.state.is_terminal():
            self.before_move()
            next_move = self.request_move()
            self.state = self.state.result(next_move)
            self.after_move()
        return self.state.winner_color

    def request_move(self):
        # state = copy.deepcopy(self.state)
        state = self.state
        player = self.state.to_play

        if self.timeout is None:
            # No timeout, just use a single process
            action = self.state.to_play.move(state)
        else:
            # For passing the messages back and forth
            self.result_q = Queue(1)
            self.signal_q = Queue(1)

            # Dynamically augment the player instance
            def is_time_up(self):
                if not self._timeup:
                    try:
                        self._signal_q.get_nowait()
                        self._timeup = True
                    except Empty:
                        return False
                return True

            def _do_move(self, state, result_q, signal_q):
                sys.stdin = os.fdopen(self.fileno)
                self._signal_q = signal_q
                action = self.move(state)
                members = [(attr, v) for attr, v in vars(self).items() \
                           if not callable(getattr(self, attr)) and not attr.startswith("__") \
                           and not attr in ['_signal_q', '_timeup', 'fileno', 'next', 'color']]
                result_q.put_nowait((action, members))

            player.is_time_up = MethodType(is_time_up, player)
            player._do_move = MethodType(_do_move, player)
            player.fileno = sys.stdin.fileno()
            player._timeup = False

            # Boot a process for the player move
            move_process = Process(target=player._do_move, args=(state, self.result_q, self.signal_q))
            move_process.start()

            action = None
            player_vars = None
            try:
                action, player_vars = self.result_q.get(True, self.timeout)

            except Empty:
                # Send the "time is up" warning
                self.signal_q.put_nowait(0)

                # Wait one second and get the move
                try:
                    action, player_vars = self.result_q.get(True, 1)
                except Empty:
                    pass

            # Clear queues
            try:
                self.signal_q.get_nowait()
            except Empty:
                pass

            try:
                self.result_q.get_nowait()
            except Empty:
                pass

            if move_process.is_alive():
                move_process.terminate()
                move_process.join(1)

            if action is None:
                print("Time is up and no valid move was returned, playing a random move.")
                # If a move wasn't placed on the result pipe in time, play a random move
                action = self.state.actions()[0]

            if player_vars is not None:
                for key, value in player_vars:
                    setattr(player, key, value)

        return action
예제 #22
0
        try:
            #detect face and draw bounding box
            crop_reduction = 3
            frame_face = cv2.resize(frame,(frame.shape[1]//crop_reduction,frame.shape[0]//crop_reduction))
            bboxes = classifier.detectMultiScale(frame_face)
            padding = 30
            x, y, w, h = bboxes[0]
            (x, y, w, h) = (x*crop_reduction, y*crop_reduction, w*crop_reduction, h*crop_reduction) 
            cv2.rectangle(frame,(x,y),(x+h,y+w),(0,255,0),2)

            #crop face for inference and put it in inference queue
            img = frame[y-padding:y+h+padding, x-padding:x+w+padding]
            img = cv2.resize(img,(224,224))
            img = np.expand_dims(img, axis=0)
            if frame_queue.empty():
                frame_queue.put_nowait(img)

            #check emotion queue for emotion and aus and write them on output image
            if not emotion_queue.empty():
                emotion,aus_array,inference_time = emotion_queue.get_nowait()
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.rectangle(frame,(x,y-50),(x+h,y),(0,255,0,0.3),-1)
            cv2.putText(frame,emotion,(x,y-10), font, 1.5,(0,0,0),2,cv2.LINE_AA)
            offset = 0
            for au in aus_array:
                cv2.putText(frame,au,(50,50 + offset), font, 1,(0,0,0),2,cv2.LINE_AA)
                offset+=30
            emoji = cv2.imread("emojis/{}.png".format(emotion),cv2.IMREAD_UNCHANGED)
            emoji = cv2.resize(emoji,(70,70))
            frame = overlay_transparent(frame,emoji,x+w-60,y-60)
        except:
예제 #23
0
class DLManager(Process):
    def __init__(self, download_dir, base_url, cache_dir=None, status_q=None,
                 max_workers=0, update_interval=1.0, dl_timeout=10, resume_file=None,
                 max_shared_memory=1024 * 1024 * 1024):
        super().__init__(name='DLManager')
        self.log = logging.getLogger('DLM')
        self.proc_debug = False

        self.base_url = base_url
        self.dl_dir = download_dir
        self.cache_dir = cache_dir if cache_dir else os.path.join(download_dir, '.cache')

        # All the queues!
        self.logging_queue = None
        self.dl_worker_queue = None
        self.writer_queue = None
        self.dl_result_q = None
        self.writer_result_q = None
        self.max_workers = max_workers if max_workers else min(cpu_count() * 2, 16)
        self.dl_timeout = dl_timeout

        # Analysis stuff
        self.analysis = None
        self.tasks = deque()
        self.chunks_to_dl = deque()
        self.chunk_data_list = None

        # shared memory stuff
        self.max_shared_memory = max_shared_memory  # 1 GiB by default
        self.sms = deque()
        self.shared_memory = None

        # Interval for log updates and pushing updates to the queue
        self.update_interval = update_interval
        self.status_queue = status_q  # queue used to relay status info back to GUI/CLI

        # Resume file stuff
        self.resume_file = resume_file
        self.hash_map = dict()

        # cross-thread runtime information
        self.running = True
        self.active_tasks = 0
        self.children = []
        self.threads = []
        self.conditions = []
        # bytes downloaded and decompressed since last report
        self.bytes_downloaded_since_last = 0
        self.bytes_decompressed_since_last = 0
        # bytes written since last report
        self.bytes_written_since_last = 0
        # bytes read since last report
        self.bytes_read_since_last = 0
        # chunks written since last report
        self.num_processed_since_last = 0
        self.num_tasks_processed_since_last = 0

    def run_analysis(self, manifest: Manifest, old_manifest: Manifest = None,
                     patch=True, resume=True, file_prefix_filter=None,
                     file_exclude_filter=None, file_install_tag=None,
                     processing_optimization=False) -> AnalysisResult:
        """
        Run analysis on manifest and old manifest (if not None) and return a result
        with a summary resources required in order to install the provided manifest.

        :param manifest: Manifest to install
        :param old_manifest: Old manifest to patch from (if applicable)
        :param patch: Patch instead of redownloading the entire file
        :param resume: Continue based on resume file if it exists
        :param file_prefix_filter: Only download files that start with this prefix
        :param file_exclude_filter: Exclude files with this prefix from download
        :param file_install_tag: Only install files with the specified tag
        :param processing_optimization: Attempt to optimize processing order and RAM usage
        :return: AnalysisResult
        """

        analysis_res = AnalysisResult()
        analysis_res.install_size = sum(fm.file_size for fm in manifest.file_manifest_list.elements)
        analysis_res.biggest_chunk = max(c.window_size for c in manifest.chunk_data_list.elements)
        analysis_res.biggest_file_size = max(f.file_size for f in manifest.file_manifest_list.elements)
        is_1mib = analysis_res.biggest_chunk == 1024 * 1024
        self.log.debug(f'Biggest chunk size: {analysis_res.biggest_chunk} bytes (== 1 MiB? {is_1mib})')

        self.log.debug(f'Creating manifest comparison...')
        mc = ManifestComparison.create(manifest, old_manifest)
        analysis_res.manifest_comparison = mc

        if resume and self.resume_file and os.path.exists(self.resume_file):
            self.log.info('Found previously interrupted download. Download will be resumed if possible.')
            try:
                missing = 0
                mismatch = 0
                completed_files = set()

                for line in open(self.resume_file).readlines():
                    file_hash, _, filename = line.strip().partition(':')
                    _p = os.path.join(self.dl_dir, filename)
                    if not os.path.exists(_p):
                        self.log.debug(f'File does not exist but is in resume file: "{_p}"')
                        missing += 1
                    elif file_hash != manifest.file_manifest_list.get_file_by_path(filename).sha_hash.hex():
                        mismatch += 1
                    else:
                        completed_files.add(filename)

                if missing:
                    self.log.warning(f'{missing} previously completed file(s) are missing, they will be redownloaded.')
                if mismatch:
                    self.log.warning(f'{mismatch} existing file(s) have been changed and will be redownloaded.')

                # remove completed files from changed/added and move them to unchanged for the analysis.
                mc.added -= completed_files
                mc.changed -= completed_files
                mc.unchanged |= completed_files
                self.log.info(f'Skipping {len(completed_files)} files based on resume data.')
            except Exception as e:
                self.log.warning(f'Reading resume file failed: {e!r}, continuing as normal...')

        # Not entirely sure what install tags are used for, only some titles have them.
        # Let's add it for testing anyway.
        if file_install_tag:
            if isinstance(file_install_tag, str):
                file_install_tag = [file_install_tag]

            files_to_skip = set(i.filename for i in manifest.file_manifest_list.elements
                                if not any(fit in i.install_tags for fit in file_install_tag))
            self.log.info(f'Found {len(files_to_skip)} files to skip based on install tag.')
            mc.added -= files_to_skip
            mc.changed -= files_to_skip
            mc.unchanged |= files_to_skip

        # if include/exclude prefix has been set: mark all files that are not to be downloaded as unchanged
        if file_exclude_filter:
            if isinstance(file_exclude_filter, str):
                file_exclude_filter = [file_exclude_filter]

            file_exclude_filter = [f.lower() for f in file_exclude_filter]
            files_to_skip = set(i.filename for i in manifest.file_manifest_list.elements if
                                any(i.filename.lower().startswith(pfx) for pfx in file_exclude_filter))
            self.log.info(f'Found {len(files_to_skip)} files to skip based on exclude prefix.')
            mc.added -= files_to_skip
            mc.changed -= files_to_skip
            mc.unchanged |= files_to_skip

        if file_prefix_filter:
            if isinstance(file_prefix_filter, str):
                file_prefix_filter = [file_prefix_filter]

            file_prefix_filter = [f.lower() for f in file_prefix_filter]
            files_to_skip = set(i.filename for i in manifest.file_manifest_list.elements if not
                                any(i.filename.lower().startswith(pfx) for pfx in file_prefix_filter))
            self.log.info(f'Found {len(files_to_skip)} files to skip based on include prefix(es)')
            mc.added -= files_to_skip
            mc.changed -= files_to_skip
            mc.unchanged |= files_to_skip

        if file_prefix_filter or file_exclude_filter or file_install_tag:
            self.log.info(f'Remaining files after filtering: {len(mc.added) + len(mc.changed)}')
            # correct install size after filtering
            analysis_res.install_size = sum(fm.file_size for fm in manifest.file_manifest_list.elements
                                            if fm.filename in mc.added)

        if mc.removed:
            analysis_res.removed = len(mc.removed)
            self.log.debug(f'{analysis_res.removed} removed files')
        if mc.added:
            analysis_res.added = len(mc.added)
            self.log.debug(f'{analysis_res.added} added files')
        if mc.changed:
            analysis_res.changed = len(mc.changed)
            self.log.debug(f'{analysis_res.changed} changed files')
        if mc.unchanged:
            analysis_res.unchanged = len(mc.unchanged)
            self.log.debug(f'{analysis_res.unchanged} unchanged files')

        if processing_optimization and len(manifest.file_manifest_list.elements) > 8_000:
            self.log.warning('Manifest contains too many files, processing optimizations will be disabled.')
            processing_optimization = False
        elif processing_optimization:
            self.log.info('Processing order optimization is enabled, analysis may take a few seconds longer...')

        # count references to chunks for determining runtime cache size later
        references = Counter()
        file_to_chunks = defaultdict(set)
        fmlist = sorted(manifest.file_manifest_list.elements,
                        key=lambda a: a.filename.lower())

        for fm in fmlist:
            self.hash_map[fm.filename] = fm.sha_hash.hex()

            # chunks of unchanged files are not downloaded so we can skip them
            if fm.filename in mc.unchanged:
                analysis_res.unchanged += fm.file_size
                continue

            for cp in fm.chunk_parts:
                references[cp.guid_num] += 1
                if processing_optimization:
                    file_to_chunks[fm.filename].add(cp.guid_num)

        if processing_optimization:
            # reorder the file manifest list to group files that share many chunks
            # 5 is mostly arbitrary but has shown in testing to be a good choice
            min_overlap = 4
            # enumerate the file list to try and find a "partner" for
            # each file that shares the most chunks with it.
            partners = dict()
            filenames = [fm.filename for fm in fmlist]

            for num, filename in enumerate(filenames[:int((len(filenames) + 1) / 2)]):
                chunks = file_to_chunks[filename]
                partnerlist = list()

                for other_file in filenames[num + 1:]:
                    overlap = len(chunks & file_to_chunks[other_file])
                    if overlap > min_overlap:
                        partnerlist.append(other_file)

                if not partnerlist:
                    continue

                partners[filename] = partnerlist

            # iterate over all the files again and this time around
            _fmlist = []
            processed = set()
            for fm in fmlist:
                if fm.filename in processed:
                    continue
                _fmlist.append(fm)
                processed.add(fm.filename)
                # try to find the file's "partner"
                f_partners = partners.get(fm.filename, None)
                if not f_partners:
                    continue
                # add each partner to list at this point
                for partner in f_partners:
                    if partner in processed:
                        continue

                    partner_fm = manifest.file_manifest_list.get_file_by_path(partner)
                    _fmlist.append(partner_fm)
                    processed.add(partner)

            fmlist = _fmlist

        # determine reusable chunks and prepare lookup table for reusable ones
        re_usable = defaultdict(dict)
        if old_manifest and mc.changed and patch:
            self.log.debug('Analyzing manifests for re-usable chunks...')
            for changed in mc.changed:
                old_file = old_manifest.file_manifest_list.get_file_by_path(changed)
                new_file = manifest.file_manifest_list.get_file_by_path(changed)

                existing_chunks = defaultdict(list)
                off = 0
                for cp in old_file.chunk_parts:
                    existing_chunks[cp.guid_num].append((off, cp.offset, cp.offset + cp.size))
                    off += cp.size

                for cp in new_file.chunk_parts:
                    key = (cp.guid_num, cp.offset, cp.size)
                    for file_o, cp_o, cp_end_o in existing_chunks[cp.guid_num]:
                        # check if new chunk part is wholly contained in the old chunk part
                        if cp_o <= cp.offset and (cp.offset + cp.size) <= cp_end_o:
                            references[cp.guid_num] -= 1
                            re_usable[changed][key] = file_o + (cp.offset - cp_o)
                            analysis_res.reuse_size += cp.size
                            break

        last_cache_size = current_cache_size = 0
        # set to determine whether a file is currently cached or not
        cached = set()
        # Using this secondary set is orders of magnitude faster than checking the deque.
        chunks_in_dl_list = set()
        # This is just used to count all unique guids that have been cached
        dl_cache_guids = set()

        # run through the list of files and create the download jobs and also determine minimum
        # runtime cache requirement by simulating adding/removing from cache during download.
        self.log.debug('Creating filetasks and chunktasks...')
        for current_file in fmlist:
            # skip unchanged and empty files
            if current_file.filename in mc.unchanged:
                continue
            elif not current_file.chunk_parts:
                self.tasks.append(FileTask(current_file.filename, empty=True))
                continue

            existing_chunks = re_usable.get(current_file.filename, None)
            chunk_tasks = []
            reused = 0

            for cp in current_file.chunk_parts:
                ct = ChunkTask(cp.guid_num, cp.offset, cp.size)

                # re-use the chunk from the existing file if we can
                if existing_chunks and (cp.guid_num, cp.offset, cp.size) in existing_chunks:
                    reused += 1
                    ct.chunk_file = current_file.filename
                    ct.chunk_offset = existing_chunks[(cp.guid_num, cp.offset, cp.size)]
                else:
                    # add to DL list if not already in it
                    if cp.guid_num not in chunks_in_dl_list:
                        self.chunks_to_dl.append(cp.guid_num)
                        chunks_in_dl_list.add(cp.guid_num)

                    # if chunk has more than one use or is already in cache,
                    # check if we need to add or remove it again.
                    if references[cp.guid_num] > 1 or cp.guid_num in cached:
                        references[cp.guid_num] -= 1

                        # delete from cache if no references left
                        if references[cp.guid_num] < 1:
                            current_cache_size -= analysis_res.biggest_chunk
                            cached.remove(cp.guid_num)
                            ct.cleanup = True
                        # add to cache if not already cached
                        elif cp.guid_num not in cached:
                            dl_cache_guids.add(cp.guid_num)
                            cached.add(cp.guid_num)
                            current_cache_size += analysis_res.biggest_chunk
                    else:
                        ct.cleanup = True

                chunk_tasks.append(ct)

            if reused:
                self.log.debug(f' + Reusing {reused} chunks from: {current_file.filename}')
                # open temporary file that will contain download + old file contents
                self.tasks.append(FileTask(current_file.filename + u'.tmp', fopen=True))
                self.tasks.extend(chunk_tasks)
                self.tasks.append(FileTask(current_file.filename + u'.tmp', close=True))
                # delete old file and rename temproary
                self.tasks.append(FileTask(current_file.filename, delete=True, rename=True,
                                           temporary_filename=current_file.filename + u'.tmp'))
            else:
                self.tasks.append(FileTask(current_file.filename, fopen=True))
                self.tasks.extend(chunk_tasks)
                self.tasks.append(FileTask(current_file.filename, close=True))

            # check if runtime cache size has changed
            if current_cache_size > last_cache_size:
                self.log.debug(f' * New maximum cache size: {current_cache_size / 1024 / 1024:.02f} MiB')
                last_cache_size = current_cache_size

        self.log.debug(f'Final cache size requirement: {last_cache_size / 1024 / 1024} MiB.')
        analysis_res.min_memory = last_cache_size + (1024 * 1024 * 32)  # add some padding just to be safe

        # Todo implement on-disk caching to avoid this issue.
        if analysis_res.min_memory > self.max_shared_memory:
            shared_mib = f'{self.max_shared_memory / 1024 / 1024:.01f} MiB'
            required_mib = f'{analysis_res.min_memory / 1024 / 1024:.01f} MiB'
            raise MemoryError(f'Current shared memory cache is smaller than required! {shared_mib} < {required_mib}. '
                              f'Try running legendary with the --enable-reordering flag to reduce memory usage.')

        # calculate actual dl and patch write size.
        analysis_res.dl_size = \
            sum(c.file_size for c in manifest.chunk_data_list.elements if c.guid_num in chunks_in_dl_list)
        analysis_res.uncompressed_dl_size = \
            sum(c.window_size for c in manifest.chunk_data_list.elements if c.guid_num in chunks_in_dl_list)

        # add jobs to remove files
        for fname in mc.removed:
            self.tasks.append(FileTask(fname, delete=True))

        analysis_res.num_chunks_cache = len(dl_cache_guids)
        self.chunk_data_list = manifest.chunk_data_list
        self.analysis = analysis_res

        return analysis_res

    def download_job_manager(self, task_cond: Condition, shm_cond: Condition):
        while self.chunks_to_dl and self.running:
            while self.active_tasks < self.max_workers * 2 and self.chunks_to_dl:
                try:
                    sms = self.sms.popleft()
                    no_shm = False
                except IndexError:  # no free cache
                    no_shm = True
                    break

                c_guid = self.chunks_to_dl.popleft()
                chunk = self.chunk_data_list.get_chunk_by_guid(c_guid)
                self.log.debug(f'Adding {chunk.guid_num} (active: {self.active_tasks})')
                try:
                    self.dl_worker_queue.put(DownloaderTask(url=self.base_url + '/' + chunk.path,
                                                            chunk_guid=c_guid, shm=sms),
                                             timeout=1.0)
                except Exception as e:
                    self.log.warning(f'Failed to add to download queue: {e!r}')
                    self.chunks_to_dl.appendleft(c_guid)
                    break

                self.active_tasks += 1
            else:
                # active tasks limit hit, wait for tasks to finish
                with task_cond:
                    self.log.debug('Waiting for download tasks to complete..')
                    task_cond.wait(timeout=1.0)
                    continue

            if no_shm:
                # if we break we ran out of shared memory, so wait for that.
                with shm_cond:
                    self.log.debug('Waiting for more shared memory...')
                    shm_cond.wait(timeout=1.0)

        self.log.debug('Download Job Manager quitting...')

    def dl_results_handler(self, task_cond: Condition):
        in_buffer = dict()

        task = self.tasks.popleft()
        current_file = ''

        while task and self.running:
            if isinstance(task, FileTask):  # this wasn't necessarily a good idea...
                try:
                    if task.empty:
                        self.writer_queue.put(WriterTask(task.filename, empty=True), timeout=1.0)
                    elif task.rename:
                        self.writer_queue.put(WriterTask(task.filename, rename=True,
                                                         delete=task.delete,
                                                         old_filename=task.temporary_filename),
                                              timeout=1.0)
                    elif task.delete:
                        self.writer_queue.put(WriterTask(task.filename, delete=True), timeout=1.0)
                    elif task.open:
                        self.writer_queue.put(WriterTask(task.filename, fopen=True), timeout=1.0)
                        current_file = task.filename
                    elif task.close:
                        self.writer_queue.put(WriterTask(task.filename, close=True), timeout=1.0)
                except Exception as e:
                    self.tasks.appendleft(task)
                    self.log.warning(f'Adding to queue failed: {e!r}')
                    continue

                try:
                    task = self.tasks.popleft()
                except IndexError:  # finished
                    break
                continue

            while (task.chunk_guid in in_buffer) or task.chunk_file:
                res_shm = None
                if not task.chunk_file:  # not re-using from an old file
                    res_shm = in_buffer[task.chunk_guid].shm

                try:
                    self.log.debug(f'Adding {task.chunk_guid} to writer queue')
                    self.writer_queue.put(WriterTask(
                        filename=current_file, shared_memory=res_shm,
                        chunk_offset=task.chunk_offset, chunk_size=task.chunk_size,
                        chunk_guid=task.chunk_guid, release_memory=task.cleanup,
                        old_file=task.chunk_file  # todo on-disk cache
                    ), timeout=1.0)
                except Exception as e:
                    self.log.warning(f'Adding to queue failed: {e!r}')
                    break

                if task.cleanup and not task.chunk_file:
                    del in_buffer[task.chunk_guid]

                try:
                    task = self.tasks.popleft()
                    if isinstance(task, FileTask):
                        break
                except IndexError:  # finished
                    task = None
                    break
            else:  # only enter blocking code if the loop did not break
                try:
                    res = self.dl_result_q.get(timeout=1)
                    self.active_tasks -= 1
                    with task_cond:
                        task_cond.notify()

                    if res.success:
                        self.log.debug(f'Download for {res.guid} succeeded, adding to in_buffer...')
                        in_buffer[res.guid] = res
                        self.bytes_downloaded_since_last += res.compressed_size
                        self.bytes_decompressed_since_last += res.size
                    else:
                        self.log.error(f'Download for {res.guid} failed, retrying...')
                        try:
                            self.dl_worker_queue.put(DownloaderTask(
                                url=res.url, chunk_guid=res.guid, shm=res.shm
                            ), timeout=1.0)
                            self.active_tasks += 1
                        except Exception as e:
                            self.log.warning(f'Failed adding retry task to queue! {e!r}')
                            # If this failed for whatever reason, put the chunk at the front of the DL list
                            self.chunks_to_dl.appendleft(res.chunk_guid)
                except Empty:
                    pass
                except Exception as e:
                    self.log.warning(f'Unhandled exception when trying to read download result queue: {e!r}')

        self.log.debug('Download result handler quitting...')

    def fw_results_handler(self, shm_cond: Condition):
        while self.running:
            try:
                res = self.writer_result_q.get(timeout=1.0)
                self.num_tasks_processed_since_last += 1

                if res.closed and self.resume_file and res.success:
                    if res.filename.endswith('.tmp'):
                        res.filename = res.filename[:-4]

                    file_hash = self.hash_map[res.filename]
                    # write last completed file to super simple resume file
                    with open(self.resume_file, 'ab') as rf:
                        rf.write(f'{file_hash}:{res.filename}\n'.encode('utf-8'))

                if res.kill:
                    self.log.debug('Got termination command in FW result handler')
                    break

                if not res.success:
                    # todo make this kill the installation process or at least skip the file and mark it as failed
                    self.log.fatal(f'Writing for {res.filename} failed!')
                if res.release_memory:
                    self.sms.appendleft(res.shm)
                    with shm_cond:
                        shm_cond.notify()

                if res.chunk_guid:
                    self.bytes_written_since_last += res.size
                    # if there's no shared memory we must have read from disk.
                    if not res.shm:
                        self.bytes_read_since_last += res.size
                    self.num_processed_since_last += 1

            except Empty:
                continue
            except Exception as e:
                self.log.warning(f'Exception when trying to read writer result queue: {e!r}')
        self.log.debug('Writer result handler quitting...')

    def run(self):
        if not self.analysis:
            raise ValueError('Did not run analysis before trying to run download!')

        # Subprocess will use its own root logger that logs to a Queue instead
        _root = logging.getLogger()
        _root.setLevel(logging.DEBUG if self.proc_debug else logging.INFO)
        if self.logging_queue:
            _root.handlers = []
            _root.addHandler(QueueHandler(self.logging_queue))

        self.log = logging.getLogger('DLManager')
        self.log.info(f'Download Manager running with process-id: {os.getpid()}')

        try:
            self.run_real()
        except KeyboardInterrupt:
            self.log.warning('Immediate exit requested!')
            self.running = False

            # send conditions to unlock threads if they aren't already
            for cond in self.conditions:
                with cond:
                    cond.notify()

            # make sure threads are dead.
            for t in self.threads:
                t.join(timeout=5.0)
                if t.is_alive():
                    self.log.warning(f'Thread did not terminate! {repr(t)}')

            # clean up all the queues, otherwise this process won't terminate properly
            for name, q in zip(('Download jobs', 'Writer jobs', 'Download results', 'Writer results'),
                               (self.dl_worker_queue, self.writer_queue, self.dl_result_q, self.writer_result_q)):
                self.log.debug(f'Cleaning up queue "{name}"')
                try:
                    while True:
                        _ = q.get_nowait()
                except Empty:
                    q.close()
                    q.join_thread()

    def run_real(self):
        self.shared_memory = SharedMemory(create=True, size=self.max_shared_memory)
        self.log.debug(f'Created shared memory of size: {self.shared_memory.size / 1024 / 1024:.02f} MiB')

        # create the shared memory segments and add them to their respective pools
        for i in range(int(self.shared_memory.size / self.analysis.biggest_chunk)):
            _sms = SharedMemorySegment(offset=i * self.analysis.biggest_chunk,
                                       end=i * self.analysis.biggest_chunk + self.analysis.biggest_chunk)
            self.sms.append(_sms)

        self.log.debug(f'Created {len(self.sms)} shared memory segments.')

        # Create queues
        self.dl_worker_queue = MPQueue(-1)
        self.writer_queue = MPQueue(-1)
        self.dl_result_q = MPQueue(-1)
        self.writer_result_q = MPQueue(-1)

        self.log.info(f'Starting download workers...')
        for i in range(self.max_workers):
            w = DLWorker(f'DLWorker {i + 1}', self.dl_worker_queue, self.dl_result_q,
                         self.shared_memory.name, logging_queue=self.logging_queue,
                         dl_timeout=self.dl_timeout)
            self.children.append(w)
            w.start()

        self.log.info('Starting file writing worker...')
        writer_p = FileWorker(self.writer_queue, self.writer_result_q, self.dl_dir,
                              self.shared_memory.name, self.cache_dir, self.logging_queue)
        self.children.append(writer_p)
        writer_p.start()

        num_chunk_tasks = sum(isinstance(t, ChunkTask) for t in self.tasks)
        num_dl_tasks = len(self.chunks_to_dl)
        num_tasks = len(self.tasks)
        num_shared_memory_segments = len(self.sms)
        self.log.debug(f'Chunks to download: {num_dl_tasks}, File tasks: {num_tasks}, Chunk tasks: {num_chunk_tasks}')

        # active downloader tasks
        self.active_tasks = 0
        processed_chunks = 0
        processed_tasks = 0
        total_dl = 0
        total_write = 0

        # synchronization conditions
        shm_cond = Condition()
        task_cond = Condition()
        self.conditions = [shm_cond, task_cond]

        # start threads
        s_time = time.time()
        self.threads.append(Thread(target=self.download_job_manager, args=(task_cond, shm_cond)))
        self.threads.append(Thread(target=self.dl_results_handler, args=(task_cond,)))
        self.threads.append(Thread(target=self.fw_results_handler, args=(shm_cond,)))

        for t in self.threads:
            t.start()

        last_update = time.time()

        while processed_tasks < num_tasks:
            delta = time.time() - last_update
            if not delta:
                time.sleep(self.update_interval)
                continue

            # update all the things
            processed_chunks += self.num_processed_since_last
            processed_tasks += self.num_tasks_processed_since_last

            total_dl += self.bytes_downloaded_since_last
            total_write += self.bytes_written_since_last

            dl_speed = self.bytes_downloaded_since_last / delta
            dl_unc_speed = self.bytes_decompressed_since_last / delta
            w_speed = self.bytes_written_since_last / delta
            r_speed = self.bytes_read_since_last / delta
            # c_speed = self.num_processed_since_last / delta

            # set temporary counters to 0
            self.bytes_read_since_last = self.bytes_written_since_last = 0
            self.bytes_downloaded_since_last = self.num_processed_since_last = 0
            self.bytes_decompressed_since_last = self.num_tasks_processed_since_last = 0
            last_update = time.time()

            perc = (processed_chunks / num_chunk_tasks) * 100
            runtime = time.time() - s_time
            total_avail = len(self.sms)
            total_used = (num_shared_memory_segments - total_avail) * (self.analysis.biggest_chunk / 1024 / 1024)

            if runtime and processed_chunks:
                rt_hours, runtime = int(runtime // 3600), runtime % 3600
                rt_minutes, rt_seconds = int(runtime // 60), int(runtime % 60)

                average_speed = processed_chunks / runtime
                estimate = (num_chunk_tasks - processed_chunks) / average_speed
                hours, estimate = int(estimate // 3600), estimate % 3600
                minutes, seconds = int(estimate // 60), int(estimate % 60)
            else:
                hours = minutes = seconds = 0
                rt_hours = rt_minutes = rt_seconds = 0

            self.log.info(f'= Progress: {perc:.02f}% ({processed_chunks}/{num_chunk_tasks}), '
                          f'Running for {rt_hours:02d}:{rt_minutes:02d}:{rt_seconds:02d}, '
                          f'ETA: {hours:02d}:{minutes:02d}:{seconds:02d}')
            self.log.info(f' - Downloaded: {total_dl / 1024 / 1024:.02f} MiB, '
                          f'Written: {total_write / 1024 / 1024:.02f} MiB')
            self.log.info(f' - Cache usage: {total_used} MiB, active tasks: {self.active_tasks}')
            self.log.info(f' + Download\t- {dl_speed / 1024 / 1024:.02f} MiB/s (raw) '
                          f'/ {dl_unc_speed / 1024 / 1024:.02f} MiB/s (decompressed)')
            self.log.info(f' + Disk\t- {w_speed / 1024 / 1024:.02f} MiB/s (write) / '
                          f'{r_speed / 1024 / 1024:.02f} MiB/s (read)')

            # send status update to back to instantiator (if queue exists)
            if self.status_queue:
                try:
                    self.status_queue.put(UIUpdate(
                        progress=perc, download_speed=dl_unc_speed, write_speed=w_speed, read_speed=r_speed,
                        memory_usage=total_used * 1024 * 1024
                    ), timeout=1.0)
                except Exception as e:
                    self.log.warning(f'Failed to send status update to queue: {e!r}')

            time.sleep(self.update_interval)

        for i in range(self.max_workers):
            self.dl_worker_queue.put_nowait(DownloaderTask(kill=True))

        self.log.info('Waiting for installation to finish...')
        self.writer_queue.put_nowait(WriterTask('', kill=True))

        writer_p.join(timeout=10.0)
        if writer_p.exitcode is None:
            self.log.warning(f'Terminating writer process, no exit code!')
            writer_p.terminate()

        # forcibly kill DL workers that are not actually dead yet
        for child in self.children:
            if child.exitcode is None:
                child.terminate()

        # make sure all the threads are dead.
        for t in self.threads:
            t.join(timeout=5.0)
            if t.is_alive():
                self.log.warning(f'Thread did not terminate! {repr(t)}')

        # clean up resume file
        if self.resume_file:
            try:
                os.remove(self.resume_file)
            except OSError as e:
                self.log.warning(f'Failed to remove resume file: {e!r}')

        # close up shared memory
        self.shared_memory.close()
        self.shared_memory.unlink()
        self.shared_memory = None

        self.log.info('All done! Download manager quitting...')
        # finally, exit the process.
        exit(0)
예제 #24
0
파일: __main__.py 프로젝트: alexanu/obadiah
def main():
    parser = argparse.ArgumentParser(description="Gather high-frequency trade "
                                     "data for a selected pair at a "
                                     "cryptocurrency exchange "
                                     "or gather an exchange-wide "
                                     "low-frequency data and store them into "
                                     "a database.")
    group = parser.add_mutually_exclusive_group()
    group.add_argument("-s",
                       "--stream",
                       help="where STREAM  must be in the format "
                       "PAIR:EXCHANGE ")

    group.add_argument("-m",
                       "--monitor",
                       help="where MONITOR is the exchange name to gather "
                       "its low-frequency data (i.e. list of traded pairs, "
                       " their parameters, etc )")

    parser.add_argument("-d",
                        "--dbname",
                        help="where DBNAME is the name of "
                        "PostgreSQL database where the data is to be saved "
                        "(default: ob-analytics),",
                        default="ob-analytics")

    parser.add_argument("-U",
                        "--user",
                        help="where USER is the name of "
                        "PostgreSQL database role to be used to save the data"
                        "(default: ob-analytics),",
                        default="ob-analytics")
    args = parser.parse_args()
    if args.stream:
        stream = args.stream.split(':')
        if stream[1] == 'BITSTAMP':
            set_start_method('spawn')

            stop_flag = Event()

            signal.signal(signal.SIGINT, lambda n, f: stop_flag.set())

            log_queue = Queue(-1)
            listener = Process(target=listener_process,
                               args=(log_queue, "./oba%s_%s_%s.log" %
                                     (tuple(stream) +
                                      (args.dbname.upper(), ))))
            listener.start()

            logging_configurer(log_queue, logging.INFO)
            logger = logging.getLogger("obanalyticsdb.main")
            logger.info('Started')

            exchanges = {'BITSTAMP': bs.capture}
            exitcode = 0

            try:
                capture = exchanges[stream[1]]
                print("Press Ctrl-C to stop ...")
                exitcode = capture(stream[0], args.dbname, args.user,
                                   stop_flag, log_queue)
            except KeyError as e:
                logger.exception(e)

            logger.info('Exit')
            log_queue.put_nowait(None)
            listener.join()

        elif stream[1] == 'BITFINEX':

            h = logging.handlers.RotatingFileHandler(
                "./oba%s_%s.log" % (
                    '_'.join(stream),
                    args.dbname.upper(),
                ), 'a', 2**24, 20)
            logging.basicConfig(format='%(asctime)s %(process)-6d %(name)s '
                                '%(levelname)-8s %(message)s',
                                handlers=[h])

            logging.getLogger("obanalyticsdb").setLevel(logging.INFO)
            logging.getLogger("websockets").setLevel(logging.INFO)
            logger = logging.getLogger(__name__ + ".main")

            task = asyncio.ensure_future(
                bf.capture(stream[0], args.user, args.dbname))
            loop = asyncio.get_event_loop()
            #  loop.set_debug(True)
            loop.add_signal_handler(
                getattr(signal, 'SIGINT'),
                functools.partial(lambda task: task.cancel(), task))

            print("Press Ctrl-C to stop ...")
            try:
                exitcode = 0
                asyncio.get_event_loop().run_until_complete(task)
            except asyncio.CancelledError:
                logger.info('Cancelled, exiting ...')
            except Exception as e:
                logger.exception(e)
                exitcode = 1
        else:
            print('Exchange %s is not supported (yet)' % stream[1])
            exitcode = 1

        sys.exit(exitcode)
    elif args.monitor:
        if args.monitor == 'BITFINEX':
            h = logging.handlers.RotatingFileHandler(
                "oba%s_%s.log" % (
                    args.monitor.upper(),
                    args.dbname.upper(),
                ), 'a', 2**24, 20)
            logging.basicConfig(format='%(asctime)s %(process)-6d %(name)s '
                                '%(levelname)-8s %(message)s',
                                handlers=[h])
            logging.getLogger(__name__.split('.')[0]).setLevel(logging.INFO)
            logger = logging.getLogger(__name__ + ".main")

            task = asyncio.ensure_future(bf.monitor(args.user, args.dbname))
            loop = asyncio.get_event_loop()
            #  loop.set_debug(True)
            loop.add_signal_handler(
                getattr(signal, 'SIGINT'),
                functools.partial(lambda task: task.cancel(), task))

            print("Press Ctrl-C to stop ...")
            try:
                exitcode = 0
                asyncio.get_event_loop().run_until_complete(task)
            except asyncio.CancelledError:
                logger.info('Cancelled, exiting ...')
            except Exception as e:
                logger.exception(e)
                exitcode = 1
        else:
            print('Exchange %s is not supported (yet)' % args.monitor)
            exitcode = 1

        sys.exit(exitcode)

    else:
        parser.print_usage()
예제 #25
0
def list_to_queue(lst: List) -> Queue:
    queue = Queue()
    for elm in lst:
        queue.put_nowait(elm)
    return queue
예제 #26
0
# coding:utf-8
# author caturbhuja
# date   2019/7/21 11:34 AM
# wechat chending2012
"""
这里是队列的用法.更详细的介绍,请看蚂蚁笔记介绍。
"""
from multiprocessing import Queue as Qa
from multiprocessing import queues

q1 = Qa(maxsize=10)
'''
q2 = queues.Queue(maxsize=10) 这个貌似需要更多函数,Qa其实就是queues.Queue的一个包装,直接看函数就知道了。
'''
# 清空队列, 这个怎么木有清空队列?python3 环境下没有清空队列,python2环境有。
# q1.queue.clear()
'''
no_wait 队列
'''
n = 0
while n < 20:
    try:
        n += 1
        q1.put_nowait(n)
    except Exception as e:
        print(e)

while 1:
    print(q1.get_nowait())
예제 #27
0
class Game(object):
    def __init__(self, M, N, player_classes, timeout=None):
        self.players = Player.create_players(player_classes)
        self.state = State.initial(M, N, self.players[0])
        self.timeout = timeout

    def play(self):
        while not self.state.is_terminal():
            self.before_move()
            next_move = self.request_move()
            print str(next_move)
            self.state = self.state.result(next_move)
            self.after_move()

        return self.state.winner_row

    def request_move(self):
        # state = copy.deepcopy(self.state)
        state = self.state
        player = self.state.player

        if self.timeout is None:
            # No timeout, just use a single process
            action = self.state.player.move(state)
        else:
            # For passing the messages back and forth
            self.result_q = Queue(1)
            self.signal_q = Queue(1)

            # Dynamically augment the player instance
            def is_time_up(self):
                if not self._timeup:
                    try:
                        self._signal_q.get_nowait()
                        self._timeup = True
                    except Empty:
                        return False
                return True

            def _do_move(self, state, result_q, signal_q):
                sys.stdin = os.fdopen(self.fileno)
                self._signal_q = signal_q
                action = self.move(state)
                members = [
                    (attr, v) for attr, v in vars(self).items()
                    if (not callable(getattr(self, attr))
                        and not attr.startswith("__") and not attr in
                        ['_signal_q', '_timeup', 'fileno', 'next', 'row'])
                ]
                result_q.put_nowait((action, members))

            player.is_time_up = MethodType(is_time_up, player)
            player._do_move = MethodType(_do_move, player)
            player.fileno = sys.stdin.fileno()
            player._timeup = False

            # Boot a process for the player move
            move_process = Process(target=player._do_move,
                                   args=(state, self.result_q, self.signal_q))
            move_process.start()

            action = None
            player_vars = None
            is_time_out = False
            try:
                action, player_vars = self.result_q.get(True, self.timeout)

            except Empty:
                # Send the "time is up" warning
                self.signal_q.put_nowait(0)

                # Wait one second and get the move
                try:
                    action, player_vars = self.result_q.get(True, 1)
                except Empty:
                    is_time_out = True

            # Clear queues
            try:
                self.signal_q.get_nowait()
            except Empty:
                pass

            try:
                self.result_q.get_nowait()
            except Empty:
                pass

            if move_process.is_alive():
                move_process.terminate()
                move_process.join(1)

            if is_time_out:
                print(
                    "Time is up and no valid move was returned, playing a random move."
                )
                # If a move wasn't placed on the result pipe in time, play a random move
                actions = state.actions()
                if not actions:
                    return None

                return random.choice(actions)

            if player_vars is not None:
                for key, value in player_vars:
                    setattr(player, key, value)

        return action
예제 #28
0
'''
Created on 31 Jul 2014

@author: matt
'''
from multiprocessing import Process, Queue

from HUD import HUD

if __name__ == '__main__':
    Q = Queue(100)
    Q.put_nowait(("roll", 30))
    Q.put_nowait(("pitch", 5))
    hud = HUD(simulate=False, master=True, update_queue=Q)
    hud.run_hud()    
class BatchLoader(Process):
    """
    This class abstracts away the loading of images.
    Images can either be loaded singly, or in a batch. The latter is used for
    the asyncronous data layer to preload batches while other processing is
    performed.
    """

    def __init__(self, params, indexlist, phase, proc_id):
        super(BatchLoader, self).__init__()
        self.indexlist = indexlist
        self.proc_id = proc_id
        self.batch_size = params['batch_size']
        self.im_shape = params['im_shape']
        self.phase = phase
        self.queue = Queue(_QSIZE)
        #rec_conn, send_conn = Pipe()
        # self.rec_conn = rec_conn
        # self.send_conn = send_conn
        ## Dividing with rest the batch size for the jobs we have
        self.batch_ck_size = self.batch_size//_nJobs
        ## in case of the last jobs adding the rest
        if self.proc_id == (_nJobs - 1):
                self.batch_ck_size += self.batch_size % _nJobs
        ## Opening LMDB
        lmdb_output_pose_env = lmdb.Environment(params['source']+'/pose_lmdb/', readonly=True, lock=False)
        self.cur_pose = lmdb_output_pose_env.begin().cursor()
        lmdb_output_flip_env = lmdb.Environment(params['source']+'/flip_lmdb/', readonly=True, lock=False)
        self.cur_flip = lmdb_output_flip_env.begin().cursor()
        lmdb_output_land_env = lmdb.Environment(params['source']+'/land_lmdb/', readonly=True, lock=False)
        self.cur_land = lmdb_output_land_env.begin().cursor()
        ################
        self.Nimgs = len(self.indexlist)
        # this class does some simple data-manipulations
        #proto_data = open(params['mean_file'], "rb").read()
        
        #a = caffe.io.caffe_pb2.BlobProto.FromString(proto_data)
        #mean  = caffe.io.blobproto_to_array(a)[0]
        ## mean is read BGR and c,h,w; we convert it to h,w,c.
        ## BGR is OK since OpenCV and caffe are BGR
        ## Then MySimpleTransformer will remove mean after that the image
        ## has been changed to BGR as well. So apple-to-apple.
        self.transformer = MySimpleTransformer()
        self.aug_tr = aug_tracker.AugmentationTracker()

        if params['mean_file'] is not None:
            mean = np.load(params['mean_file'])
            mean = mean.transpose(1, 2, 0)
            mean = np.float32(mean)
            self.transformer.set_mean(mean)

        if self.phase == 1:
            util.myprint("BatchLoader_valid" + str(self.proc_id) + " initialized with " + str(self.Nimgs) +" images")
        else:
            util.myprint("BatchLoader_train" + str(self.proc_id) + "  initialized with " + str(self.Nimgs) +" images")
            util.myprint("This will process: " + str(self.batch_ck_size)+'/'+str(self.batch_size) )

    def run(self):
        if self.phase == 1:
            util.myprint("Process started pre-fetching for Validation " + str(self.proc_id) + " : nimgs " + str(self.Nimgs) )
        else:
            util.myprint("Process started pre-fetching for Training " + str(self.proc_id) + " : nimgs " + str(self.Nimgs) )
        ## Counter to the entire augmented set
        count = 0
        ## Counter to the relative mini-batch
        countStep = 0
        ## Pre-allocate the data for the mini-batch
        listData = [None]*self.batch_ck_size
        while True:
            for ii in range(0,self.Nimgs):
                ####### Checking if we finished an (augmented) epoch
                if  count == self.Nimgs:
                    util.myprint("Finished an (augmented) epoch for loader id " + str(self.proc_id) + "...shuffling")
                    count = 0
                    shuffle(self.indexlist)

                # ######## Part to resume/wait a certain process when the other is operating
                # if self.phase == 1:
                #     if not _eventValidList[self.proc_id].is_set():
                #         util.myprint('Waiting Validation Loader ' + str(self.proc_id) + ' to start again')
                #         _eventValidList[self.proc_id].wait()
                # else:
                #     if not _eventTrainList[self.proc_id].is_set():
                #         util.myprint('Waiting Train Loader ' + str(self.proc_id) + ' to start again')
                #         _eventTrainList[self.proc_id].wait()

                ### Starting to do augmentation
                batch_img = None
                #index is of form:
                #blur_fr_13 XXXm.0hhvfrvXXX_MS000024 !!TMPDIR!!/imgs/XXXm.0hhvfrvXXX/XXXm.0hhvfrvXXX_MS000024.jpg 0
                index = self.indexlist[ii]
                index = index.split(' ')
                aug_type = index[0] #augemntation type
                image_key = index[1] # image key
                image_file_name = index[2] #image
                label = np.float32(index[3]) #label
                ## Loading the image with OpenCV
                flipON = int( np.frombuffer( self.cur_flip.get(image_key) )[1] ) == 1
                im = cv2.imread(image_file_name,cv2.CV_LOAD_IMAGE_COLOR)
                ## Check immediately if we have to flip an image
                if flipON:
                    im = cv2.flip(im, 1)
                im_arr = np.asarray(im)
                aug_im = None
                if 'align2d' in aug_type or 'blur' in aug_type:
                    lmark = self.cur_land.get(image_key)
                    lmark = np.frombuffer(lmark, dtype='float64').reshape(68,2)
                    lmarks = np.zeros((1,68,2))
                    lmarks[0] = lmark
                    aug_im = self.aug_tr.augment_fast(aug_type=aug_type,img=im,landmarks=lmarks,flipON=flipON)
                elif 'render' in aug_type:                    
                    prj_matrix = np.frombuffer(self.cur_pose.get(image_key+'_'+aug_type), dtype='float64').reshape(3,4)
                    prj_matrix = np.asmatrix(prj_matrix)
                    aug_im = self.aug_tr.augment_fast(aug_type=aug_type,img=im,prj_matrix=prj_matrix,flipON=flipON)
                try:
                    aug_im = cv2.resize(aug_im, ( self.im_shape[0], self.im_shape[1] ),\
                                          interpolation=cv2.INTER_LINEAR )
                    batch_img = self.transformer.preprocess(aug_im)
                except Exception as ex:
                    util.myprint("Warning: Was not able to use aug_img because: " + str(ex))
                    util.myprint( "Skipping the image: " + image_file_name)
                count += 1
                ##If image have been processes correctly, add it to the mini-batch            
                if batch_img is not None:
                    data = {'img': batch_img , 'label' : label}
                    listData[countStep] = data 
                    countStep+=1
                    if countStep == self.batch_ck_size:
                        isDone = False
                        while not isDone:
                            try:
                                ##This mini-batch is ready to be sent for train
                                ## Resetting the relative listData and countStep
                                self.queue.put_nowait( list(listData) )
                            except std_Queue.Full as full:
                                pass
                            else:
                                #self.send_conn.send( (listData) )
                                countStep = 0
                                isDone = True
                                listData = [None]*self.batch_ck_size
예제 #30
0
파일: ww_04_queue.py 프로젝트: wwyywg/Py3
#coding=utf-8
from multiprocessing import Queue

if __name__ == '__main__':

    q = Queue(3)  # 初始化一个Queue对象,最多可接收三条put消息
    q.put("消息1")
    q.put("消息2")
    print(q.full())  # False
    q.put("消息3")
    print(q.full())  # True

    # 因为消息队列已满下面的try都会抛出异常,
    try:
        q.put("消息4", True, 2)
    except:
        print("消息队列已满,现有消息数量:%s" % q.qsize())

    try:
        q.put_nowait("消息4")
    except:
        print("消息队列已满,现有消息数量:%s" % q.qsize())

    # 推荐的方式,先判断消息队列是否已满,再写入
    if not q.full():
        q.put_nowait("消息4")

    # 读取消息时,先判断消息队里是否为空,再读取
    if not q.empty():
        for i in range(q.qsize()):
            print(q.get_nowait())
예제 #31
0
파일: records.py 프로젝트: bolirev/PyPylon
def record(config, date):
    """ record one camera with config as properties
    """
    # Get certain userfull variable
    if 'name' in config.keys():
        camname = config['name']
    else:
        raise KeyError('config should contain a name')

    if 'nframe' in config.keys():
        nframe = config['nframe']
    else:
        raise KeyError('config should contain a nframe')

    if 'buffer_size' in config.keys():
        n_buffer = config['buffer_size']
    else:
        raise KeyError('config should contain a buffer_size')

    if 'logger_threshold' in config.keys():
        logth = config['logger_threshold']
    else:
        raise KeyError('config should contain a logger_threshold')

    # Load configuration
    cam, folder = configure_camera(config)

    # Create shared array
    # of size n_buffer*image-size
    # this array is not locked so the ready_queue should
    # have the same size as the n_buffer
    array_dim = (cam.properties['Height'], cam.properties['Width'])
    nimel = np.prod(array_dim)
    m = mp.Array(ctypes.c_ubyte, int(n_buffer * nimel))
    array = np.frombuffer(m.get_obj(), dtype=np.uint8)

    # Create the control queue to avoid reading/writing on array
    ready_queue = Queue(maxsize=n_buffer)

    # Create a file name from camera name
    filename = os.path.join(folder, date, camera2name(cam) + '.lz4')
    dirname = os.path.dirname(filename)
    if not os.path.exists(dirname):
        os.makedirs(dirname)
    # Create a logger to store the data
    logger = LZ4DiffLogger(m, ready_queue, nimel, buffer_type=np.uint8)
    logger.filename = filename
    logger.threshold = np.uint8(logth)
    if logger.threshold != logth:
        raise ValueError('Treshold changed due to type conversion')
    logger.start()

    # Start recording
    print('Start recording')
    t_start = time.time()
    try:
        for rot_i in cam.grab_inrings(nframe, array, n_buffer):
            ready_queue.put_nowait(rot_i)
        ready_queue.put(None)
    except RuntimeError as e:
        print('ERROR: ', camname)
        ready_queue.put(None)
        raise (e)

    logger.join()
    cam.close()
    t_end = time.time()
    t_span = t_end - t_start
    print('End recording at {} fps'.format(nframe / t_span))
    return 'Done'
예제 #32
0
from multiprocessing import Queue
q = Queue(3)  #初始化一个Queue对象,最多可以接收三条 put消息
q.put("消息1")
q.put("消息2")
print(q.full())  #false
q.put("消息3")
print(q.full())  #True

#消息队列满了之后,再往里put消息会抛出异常

try:
    q.put("消息4", True, 2)  #两秒后抛出异常
except:
    print("消息队列满了,现在有消息%s条" % q.qsize())  #qsize() 获取消息数目

try:
    q.put_nowait("消息4")  #如果满了会立即抛出异常
except:
    print("消息队列满了,现在有消息%s条" % q.qsize())  # qsize() 获取消息数目

#推荐的方式,先判断再写入
if not q.full():
    q.put_nowait("消息4")

#读取的时候先判断
if not q.empty():
    for i in range(q.qsize()):
        print(q.get_nowait())
        #每get一条,就少一条
        print("现在有消息%s条" % q.qsize())
예제 #33
0
class Game(object):
    def __init__(self, M, N, K, player_classes, timeout=None):
        self.players = Player.create_players(player_classes)
        self.state = State.initial(M, N, K, self.players[0])
        self.timeout = timeout

    def play(self):
        while not self.state.is_terminal():
            self.before_move()
            next_move = self.request_move()
            self.state = self.state.result(next_move)
            self.after_move()
        return self.state.winner_color

    def request_move(self):
        # state = copy.deepcopy(self.state)
        state = self.state
        player = self.state.to_play

        if self.timeout is None:
            # No timeout, just use a single process
            action = self.state.to_play.move(state)
        else:
            # For passing the messages back and forth
            self.result_q = Queue(1)
            self.signal_q = Queue(1)

            # Dynamically augment the player instance
            def is_time_up(self):
                try:
                    self._signal_q.get_nowait()
                    return True
                except Empty:
                    return False

            def do_move(self, state, result_q, signal_q):
                sys.stdin = os.fdopen(self.fileno)
                self._signal_q = signal_q
                result_q.put_nowait(self.move(state))

            player.is_time_up = MethodType(is_time_up, player)
            player.do_move = MethodType(do_move, player)
            player.fileno = sys.stdin.fileno()

            # Boot a process for the player move
            move_process = Process(target=player.do_move,
                                   args=(state, self.result_q, self.signal_q))
            move_process.start()

            action = None
            try:
                action = self.result_q.get(True, self.timeout)

            except Empty:
                # Send the "time is up" warning
                self.signal_q.put_nowait(0)

                # Wait one second and get the move
                try:
                    action = self.result_q.get(True, 1)
                except Empty:
                    pass

            # Clear queues
            try:
                self.signal_q.get_nowait()
            except Empty:
                pass

            try:
                self.result_q.get_nowait()
            except Empty:
                pass

            if move_process.is_alive():
                move_process.terminate()
                move_process.join(1)

            if action is None:
                # If a move wasn't placed on the result pipe in time, play a random move
                action = self.state.actions()[0]

        return action
예제 #34
0
            p[i].daemon = True
            p[i].start()

    coor0=Queue(1)
    ROI = Queue(1)  # 检测框
    net_control = Queue(1)  # 控制神经网络是否开始检测
    graspPoint = Queue(1)  # 抓取点
    print('启动厨房餐具回收机器人……')

    p = []
    p.append(Process(target=Motion.move, args=(ROI, net_control, graspPoint,coor0), name='机械臂'))
    start(p)
    start_detecting()
    # p.append(Process(target=start_detecting, args=(net_control, ROI, graspPoint), name='神经网络'))

    sleep(1)
    print('正在启动!')



    # 同时启动3个进程
    # while True:
    #     if coor0 !=None:
    #         break

    while True:
        E = input("输入e退出")
        if E == 'e':
            break
    net_control.put_nowait(E)
    print("感谢使用!")
예제 #35
0
            '$exists': False
        }
    }, {
        'following_count': {
            '$exists': True
        }
    }]
})
print sum_tweets.count()
for i in range(5000):
    try:
        one_doc = [
            sum_tweets[i]['tweet_id'], sum_tweets[i]['following_count'],
            sum_tweets[i]['author_full_name']
        ]
        queue.put_nowait(one_doc)

    except Exception:
        continue

url = "https://twitter.com/login"
payload = {
    'session[username_or_email]': 'irony_research',
    'session[password]': 'research_irony'
}

# two more accounts

#payload1 = { 'session[username_or_email]': 'irony_research1',
#			'session[password]': 'research1_irony'
#			}
예제 #36
0
class CrawlProcess():

    _model = Task
    __instance = None
    count = 0

    @staticmethod
    def get_instance():
        """ Static access method. """
        print(CrawlProcess.__instance)
        if CrawlProcess.__instance == None:
            CrawlProcess()
        return CrawlProcess.__instance

    def __init__(self):
        if CrawlProcess.__instance != None:
            raise Exception("This class is a singleton!")
        else:
            CrawlProcess.__instance = self
            #self.process = Process(target=CrawlProcess.crawl)
            self.process = None
            self.crawler_process = None
            self.task = None
            self.q = Queue()
            self.parent_conn, self.child_conn = Pipe()



    #@classmethod
    #def crawl(cls, q):
    #@classmethod
    #def crawl(cls, process, q):
    @classmethod
    def crawl(cls, q, conn):
        print()
        print()
        print('***************************************************************************************')
        print('crawl')

        def close(spider, reason):
            print(f'{multiprocessing.current_process().name}: *!!CLOSE')
            write_in_a_file('CrawlerProcess.signal.close', {'reason': reason}, 'task.txt')
            t = Task.objects.get_latest_crawler_task()
            d = datetime.today()
            t.description = f'spider closed with count: {CrawlProcess.count} at {str(d)}'
            t.result = CrawlProcess.count
            t.save()

        def open(spider):
            print(f'{multiprocessing.current_process().name}: *!!OPEN')
            try:
                name = spider.name
            except:
                name = str(spider)
            write_in_a_file('CrawlerProcess.signal.open', {'spider': name}, 'task.txt')
            CrawlProcess.count = 0
            try:
                t = Task.objects.get_latest_crawler_task()
                t.name = str(process.pid)
                t.save()
            except Exception as e:
                t.name = e
                t.save()
            #q.put_nowait()
            print()


        def scraped(item, response, spider):
            print(f'{multiprocessing.current_process().name}: *!!SCRAPED')

            print()
            CrawlProcess.count = CrawlProcess.count + 1
            n = CrawlProcess.count
            write_in_a_file('CrawlerProcess.signal.scraped_item', {'response': response, 'count': n}, 'task.txt')
            try:
                q.get_nowait()
                q.put_nowait(n)
            except:
                q.put_nowait(n)

        def stopped(*args, **kwargs):
            write_in_a_file('CrawlerProcess.signal.stopped', {'args': args, 'kwargs': kwargs}, 'task.txt')

        def error(*args, **kwargs):
            write_in_a_file('CrawlerProcess.signal.error', {'args': args, 'kwargs': kwargs}, 'task.txt')

        def send_by_pipe(item):
            try:
                conn.send(item)
                #conn.close()
            except Exception as e:
                write_in_a_file('CrawlProcess._crawl: error conn.send', {'conn error': e}, 'debug.txt')

        process = CrawlerProcess(get_project_settings())
        write_in_a_file('CrawlProcess.crawl: first', {'crawler_process': str(process), 'dir process': dir(process)},
                        'debug.txt')
        send_by_pipe(process)
        write_in_a_file('CrawlProcess.crawl: second', {'crawler_process': str(process), 'dir process': dir(process)},'debug.txt')
        process.crawl(InfoempleoSpider())
        write_in_a_file('CrawlProcess.crawl: third', {'crawler_process': str(process), 'dir process': dir(process)},'debug.txt')
        crawler = Crawler(InfoempleoSpider())
        crawler.signals.connect(open, signal=signals.spider_opened)
        crawler.signals.connect(scraped, signal=signals.item_scraped)
        crawler.signals.connect(close, signal=signals.spider_closed)
        crawler.signals.connect(stopped, signal=signals.engine_stopped)
        crawler.signals.connect(error, signal=signals.spider_error)

        write_in_a_file('CrawlProcess.crawl: before', {'crawler_process': str(process),'dir process': dir(process)},'debug.txt')

        process.crawl(crawler)
        write_in_a_file('CrawlProcess.crawl: after', {'crawler_process': str(process), 'dir process': dir(process)}, 'debug.txt')

        process.start()
        write_in_a_file('CrawlProcess._crawl: process started', {'crawler_process': str(process), 'dir process': dir(process)}, 'debug.txt')

        print('***************************************************************************************')
        print(f'CrawlerProcess: {process}')
        print(dir(process))
        print('***************************************************************************************')
        print()
        print()
        write_in_a_file('CrawlProcess.crawl', {'CrawlerProcess': str(process), 'dir(CrawlerProcess)': dir(process)}, 'task.txt')
        process.join()
        write_in_a_file('CrawlProcess.crawl: process.join', {}, 'task.txt')
        write_in_a_file('CrawlProcess.crawl: process.join', {}, 'spider.txt')

        print('Crawler Process has Finished!!!!!')



    @classmethod
    def crawl2(cls, q):
        while CrawlProcess.count < 15:
           # print(f'doing something: {CrawlProcess.count}')
            CrawlProcess.count = CrawlProcess.count + 1
            n = CrawlProcess.count
            try:
                q.get_nowait()
            except:
                pass
            q.put(n)
            if CrawlProcess.count % 5 == 0:
               # print(f'qsize: {q.qsize()}')
                time.sleep(5)


    def _clear_queue(self):
        while not self.q.empty():
            self.q.get_nowait()



    def _init_process(self, user):
        print(f'CrawlerProcess.init_process')
        self.q.put_nowait(0)
        self.process = Process(target=CrawlProcess.crawl, args=(self.q, self.child_conn,))
        self.task = Task(user=user, state=Task.STATE_PENDING, type=Task.TYPE_CRAWLER)



    def _start_process(self):
        print(f'CrawlerProcess._start_process')
        self.init_datetime = timezone.now()  # Before create the task
        self.process.start()
        self.task.pid = self.process.pid
        write_in_a_file('CrawlProcess._start_process: process started', {'pid': self.process.pid}, 'debug.txt')
        self.task.state = Task.STATE_RUNNING
        self.task.save()
        self.crawler_process = self.parent_conn.recv()
        write_in_a_file('CrawlProcess._start_process: conn.recv', {'crawler_process':str(self.crawler_process), 'dir crawler_process':dir(self.crawler_process)}, 'debug.txt')
        write_in_a_file('CrawlProcess._start_process', {'CrawlerProcess': str(self.crawler_process), 'dir(CrawlerProcess)': dir(self.crawler_process)},'task.txt')


    def _reset_process(self, state=Task.STATE_FINISHED):
        print(f'CrawlerProcess._reset_process({state})')
        try:
            self.process.terminate()
            write_in_a_file('_reset_process terminated (from stop)', {'is_running': self.process.is_alive()}, 'debug.txt')
            self.task.result = CrawlProcess.count
            self.task.state = state
            self.task.save()
            self.process.join()  # ! IMPORTANT after .terminate -> .join
            write_in_a_file('_reset_process joinned (from stop)', {'is_running': self.process.is_alive()}, 'debug.txt')
        except:
            pass
        try:
            self.result = self.q.get_nowait()
        except Exception as e:
            pass
        self._clear_queue()


    def _update_process(self):
        print('CrawlerProcess._update_process')
        print(f'process is alive: {self.process and self.process.is_alive()}')
        if self.process and not self.process.is_alive():
            self._reset_process()


    def start(self, user=None, **kwargs):
        """
        Si el proceso no está vivo es que o no se ha iniciado aún o que ya ha terminado, así que
        se guardan los datos almacenados y se ejecuta el proceso.
        Si el proceso está vivo no se hace nada.

        :param user: The uses that make the request
        :param kwargs:
        :return:
        """
        print(f'self.q.empty(): {self.q.empty()}')
        print(f'self.q.qsize(): {self.q.qsize()}')

        if not self.is_scrapping():
            if self.task and (self.task.state == Task.STATE_RUNNING):
                self._reset_process()
            self._init_process(user)
            self._start_process()

    def stop(self):
        print(f'CrawleProcess.stop')
        self._reset_process(Task.STATE_INCOMPLETE)
       # self.crawler_process.stop()
        #self.crawler_process.join()


    def join(self):
        self.process.join()

    def get_actual_task(self):
        self._update_process()
        return self.task

    def get_latest_task(self):
        last_task = Task.objects.get_latest_crawler_task()
        # If the latest task from de db has state equal STATE_RUNNING and not is the actual task will be an incomplete task...
        #... and would have to update its state
        is_an_incomplete_task = (
                last_task and
                last_task.state == Task.STATE_RUNNING and
                (not self.task or self.task.pk != last_task.pk)
        )
        if is_an_incomplete_task:
            last_task.state = Task.STATE_INCOMPLETE
            last_task.save()
        return last_task

    def is_scrapping(self):
        print(CrawlProcess.is_scrapping)
        if self.process:
            return self.process.is_alive()
        else:
            return False

    def _get_scraped_jobs(self):
        latest_task = Task.objects.get_latest_crawler_task()
        return Job.objects.filter(Q(created_at__gte=latest_task.created_at) | Q(updated_at__gte=latest_task.created_at))

    def get_scraped_items_number(self):
        print()
        print('!!!! CrawlProcess.get_scraped_items_number');print();
        count = CrawlProcess.count
        try:
            print(self.q)
            #print(f'CrawlProcess.count: {CrawlProcess.count}')
            #print(f'qsize: {self.q.qsize()}')
            count = self.q.get(block=True, timeout=5)
            CrawlProcess.count = count
            print(f'q.count: {count}')
        except Exception as e:
            print(f'get_scraped_items_number')
           # save_error(e, {'count': count})
        return count


    def get_scraped_items_percentage(self):
        # Calcula el total con los items scrapeados de la tarea enterior
        count = self.get_scraped_items_number()
        task = Task.objects.get_latest_finished_crawler_task()
        if task:
            old_result = task.result or 20000
        else:
            old_result = 20000

        if count < old_result:
            total = old_result
        else:
            total = count
        db_count = self._get_scraped_jobs().count()

        try:
            percentage = round(db_count/total, 2)
        except:
            percentage = 0

        if  percentage >= 0.95 and self.is_scrapping():
            percentage = 0.95

        return percentage
예제 #37
0
파일: mylog.py 프로젝트: nonenull/SOWEB
class Mylog:
    def __init__(self):
        self.__LOG_PATH = 'logs'
        self.__LEVEL_ENUM = {
            'ERROR' : 0,
            'WARNING' : 1,
            'INFO' : 2,
            'DEBUG' : 3
        }
        self.__checkLogPath()
        self.__setLogConfig()
        logging.config.dictConfig(self.__LOGGING)

        self.__queue = Queue()

    def __setLogConfig(self):
        # log配置
        self.__LOGGING = {
            # 版本,总是1
            'version': 1,
            'disable_existing_loggers': True,
            'formatters': {
                'verbose': {'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'},
                'simple': {'format': '%(levelname)s %(message)s'},
                'default': {
                    'format': '%(asctime)s %(levelname)s %(message)s'
                }
            },
            'handlers': {
                'console': {
                    'level': 'DEBUG',
                    'class': 'logging.StreamHandler',
                    'formatter': 'default'
                },
                'debugFile': {
                    'level': 'DEBUG',
                    # TimedRotatingFileHandler会将日志按一定时间间隔写入文件中,并
                    # 将文件重命名为'原文件名+时间戮'这样的形式
                    # Python提供了其它的handler,参考logging.handlers
                    'class': 'logging.handlers.TimedRotatingFileHandler',
                    'formatter': 'default',
                    # 后面这些会以参数的形式传递给TimedRotatingFileHandler的
                    # 构造器

                    # filename所在的目录要确保存在
                    'filename': '%s/applog-debug.log'%self.__LOG_PATH,
                    # 刷新时间
                    'when': 'D',
                    'interval': 1,
                    'encoding': 'utf8',
                },
                'infoFile': {
                    'level': 'INFO',
                    # TimedRotatingFileHandler会将日志按一定时间间隔写入文件中,并
                    # 将文件重命名为'原文件名+时间戮'这样的形式
                    # Python提供了其它的handler,参考logging.handlers
                    'class': 'logging.handlers.TimedRotatingFileHandler',
                    'formatter': 'default',
                    # 后面这些会以参数的形式传递给TimedRotatingFileHandler的
                    # 构造器

                    # filename所在的目录要确保存在
                    'filename': '%s/applog-info.log'%self.__LOG_PATH,
                    # 每5分钟刷新一下
                    'when': 'D',
                    'interval': 1,
                    'encoding': 'utf8',
                },
                'errorFile': {
                    'level': 'ERROR',
                    # TimedRotatingFileHandler会将日志按一定时间间隔写入文件中,并
                    # 将文件重命名为'原文件名+时间戮'这样的形式
                    # Python提供了其它的handler,参考logging.handlers
                    'class': 'logging.handlers.TimedRotatingFileHandler',
                    'formatter': 'default',
                    # 后面这些会以参数的形式传递给TimedRotatingFileHandler的
                    # 构造器

                    # filename所在的目录要确保存在
                    'filename': '%s/applog-error.log'%self.__LOG_PATH,
                    'when': 'D',
                    'interval': 1,
                    'encoding': 'utf8',
                }
            },
            'loggers': {
                'debug': {
                    'level': 'DEBUG',
                    'handlers': self.__setHandlers('debug'),
                    'propagate': True
                },
                'info': {
                    'level': 'INFO',
                    'handlers': self.__setHandlers('info'),
                    'propagate': True
                },
                'error': {
                    'level': 'ERROR',
                    'handlers': self.__setHandlers('error'),
                    'propagate': True
                }
            }
        }

    def __checkLogPath(self):
        # 检查并保证logs目录存在
        if not os.path.exists(self.__LOG_PATH):
            os.makedirs(self.__LOG_PATH)

    #'handlers': ['console', 'debugFile'],
    def __setHandlers(self,level):
        handlers = []
        handlers.append('%sFile'%level)
        if not DAEMON:
            handlers.append('console')

        return handlers

    def __getStack(self,depth = 2):
        frame = _getframe(depth)
        code = frame.f_code
        moduleName = frame.f_globals.get('__name__')
        funcName = code.co_name
        lineNumber = code.co_firstlineno
        return '[%s.%s:%d] - ' % (moduleName, funcName, lineNumber)

    def info(self,*args):
        self.__queue.put_nowait(('info',args))

    def debug(self,*args):
        args = list(args)
        args.insert(0,self.__getStack())
        self.__queue.put_nowait(('debug',args))

    def error(self,*args):
        args = list(args)
        args.insert(0,self.__getStack())
        self.__queue.put_nowait(('error',args))

    def startLogHandles(self):
        while 1:
            level,args = self.__queue.get()
            logger = logging.getLogger(level)
            if self.__LEVEL_ENUM.get(LOG_LEVEL) >= self.__LEVEL_ENUM.get('INFO'):
                log = getattr(logger,level)
                log(' '.join(map(str,args)))
예제 #38
0
class Logger(object):
    """
    The actual Logger Frontend, passing logging messages to the assigned
    logging backend if appropriate or to python's logging module if not
    """
    def __init__(self,
                 backend: BaseBackend,
                 max_queue_size: int = None,
                 logging_frequencies=None,
                 reduce_types=None,
                 level=logging.NOTSET):
        """

        Parameters
        ----------
        backend : :class:`delira.logging.base_backend.BaseBackend`
            the logging backend to use
        max_queue_size : int
            the maximum size for the queue; if queue is full, all additional
            logging tasks will be dropped until some tasks inside the queue
            were executed; Per default no maximum size is applied
        logging_frequencies : int or dict
            specifies how often to log for each key.
            If int: integer will be applied to all valid keys
            if dict: should contain a frequency per valid key. Missing keys
            will be filled with a frequency of 1 (log every time)
            None is equal to empty dict here.
        reduce_types : str of FunctionType or dict
            Values are logged in each iteration. This argument specifies,
            how to reduce them to a single value if a logging_frequency
            besides 1 is passed

            if str:
                specifies the reduction type to use. Valid types are
                'last' | 'first' | 'mean' | 'median' | 'max' | 'min'.
                The given type will be mapped to all valid keys.
            if FunctionType:
                specifies the actual reduction function. Will be applied for
                all keys.
            if dict: should contain pairs of valid logging keys and either str
                or FunctionType. Specifies the logging value per key.
                Missing keys will be filles with a default value of 'last'.
                Valid types for strings are
                'last' | 'first' | 'mean' | 'max' | 'min'.
        level : int
            the logging value to use if passing the logging message to
            python's logging module because it is not appropriate for logging
            with the assigned logging backendDict[str, Callable]

        Warnings
        --------
        Since the intermediate values between to logging steps  are stored in
        memory to enable reduction, this might cause OOM errors easily
        (especially if the logged items are still on GPU).
        If this occurs you may want to choose a lower logging frequency.

        """

        # 0 means unlimited size, but None is more readable
        if max_queue_size is None:
            max_queue_size = 0

        # convert to empty dict if None
        if logging_frequencies is None:
            logging_frequencies = {}

        # if int: assign int to all possible keys
        if isinstance(logging_frequencies, int):
            logging_frequencies = {
                k: logging_frequencies
                for k in backend.KEYWORD_FN_MAPPING.keys()
            }
        # if dict: update missing keys with 1 and make sure other values
        # are ints
        elif isinstance(logging_frequencies, dict):
            for k in backend.KEYWORD_FN_MAPPING.keys():
                if k not in logging_frequencies:
                    logging_frequencies[k] = 1
                else:
                    logging_frequencies[k] = int(logging_frequencies[k])
        else:
            raise TypeError("Invalid Type for logging frequencies: %s" %
                            type(logging_frequencies).__name__)

        # assign frequencies and create empty queues
        self._logging_frequencies = logging_frequencies
        self._logging_queues = {}

        default_reduce_type = "last"
        if reduce_types is None:
            reduce_types = default_reduce_type

        # map string and function to all valid keys
        if isinstance(reduce_types, (str, FunctionType)):
            reduce_types = {
                k: reduce_types
                for k in backend.KEYWORD_FN_MAPPING.keys()
            }

        # should be dict by now!
        if isinstance(reduce_types, dict):
            # check all valid keys for occurences
            for k in backend.KEYWORD_FN_MAPPING.keys():
                # use default reduce type if necessary
                if k not in reduce_types:
                    reduce_types[k] = default_reduce_type
                # check it is either valid string or already function type
                else:
                    if not isinstance(reduce_types, FunctionType):
                        assert reduce_types[k] in possible_reductions()
                        reduce_types[k] = str(reduce_types[k])
                # map all strings to actual functions
                if isinstance(reduce_types[k], str):
                    reduce_types[k] = get_reduction(reduce_types[k])

        else:
            raise TypeError("Invalid Type for logging reductions: %s" %
                            type(reduce_types).__name__)

        self._reduce_types = reduce_types

        self._abort_event = Event()
        self._flush_queue = Queue(max_queue_size)
        self._backend = backend
        self._backend.set_queue(self._flush_queue)
        self._backend.set_event(self._abort_event)
        self._level = level

    def log(self, log_message: dict):
        """
        Main Logging Function, Decides whether to log with the assigned
        backend or python's internal module

        Parameters
        ----------
        log_message : dict
            the message to log; Should be a dict, where the keys indicate the
            logging function to execute, and the corresponding value holds
            the arguments necessary to execute this function

        Raises
        ------
        RuntimeError
            If the abort event was set externally

        """

        try:
            if self._abort_event.is_set():
                self.close()
                raise RuntimeError(
                    "Abort-Event in logging process was set: %s" %
                    self._backend.name)

            # convert tuple to dict if necessary
            if isinstance(log_message, (tuple, list)):
                if len(log_message) == 2:
                    log_message = (log_message, )
                log_message = dict(log_message)

            # try logging and drop item if queue is full
            try:
                # logging appropriate message with backend
                if isinstance(log_message, dict):
                    # multiple logging instances at once possible with
                    # different keys
                    for k, v in log_message.items():
                        # append tag if tag is given, because otherwise we
                        # would enqueue same types but different tags in same
                        # queue
                        if "tag" in v:
                            queue_key = k + "." + v["tag"]
                        else:
                            queue_key = k

                        # create queue if necessary
                        if queue_key not in self._logging_queues:
                            self._logging_queues[queue_key] = []

                        # append current message to queue
                        self._logging_queues[queue_key].append({k: v})
                        # check if logging should be executed
                        if (len(self._logging_queues[queue_key]) %
                                self._logging_frequencies[k] == 0):
                            # reduce elements inside queue
                            reduce_message = reduce_dict(
                                self._logging_queues[queue_key],
                                self._reduce_types[k])
                            # flush reduced elements
                            self._flush_queue.put_nowait(reduce_message)
                            # empty queue
                            self._logging_queues[queue_key] = []
                else:
                    # logging inappropriate message with python's logging
                    logging.log(self._level, log_message)
            except Full:
                pass

        # if an exception was raised anywhere, the abort event will be set
        except Exception as e:
            self._abort_event.set()
            raise e

    def __call__(self, log_message: dict):
        """
        Makes the class callable and forwards the call to
        :meth:`delira.logging.base_logger.Logger.log`

        Parameters
        ----------
        log_message : dict
            the logging message to log

        Returns
        -------
        Any
            the return value obtained by
            :meth:`delira.logging.base_logger.Logger.log`

        """
        return self.log(log_message)

    def close(self):
        """
        Function to close the actual logger; Waits for queue closing and sets
        the abortion event

        """
        if hasattr(self, "_flush_queue"):
            self._flush_queue.close()
            self._flush_queue.join_thread()

        if hasattr(self, "abort_event"):
            self._abort_event.set()

    def __del__(self):
        """
        Function to be executed, when class instance will be deleted;
        Calls :meth:`delira.logging.base_logger.Logger.close`

        """

        self.close()
            if i==starttime+1000:
               pub_gamma.publish(1)
               caption_queue.put_nowait('STIM right')
            if i==starttime+1500:
               pub_gamma.publish(0)
               caption_queue.put_nowait('STIM off')
            if i==starttime+2000:
               break
            rospy.sleep(0.01)
            i += 1
    pub_gamma.publish(0)  

if __name__ == "__main__":
    timestamp_shmem = Value('L', 0L, lock=False)
    caption_queue = Queue()
    caption_queue.put_nowait('preparing')
    
    save_queue = Queue()
    save_thread_stop_requested = multiprocessing.Event()
    save_thread = multiprocessing.Process(target=save_frames, args=(save_queue,save_thread_stop_requested))
    save_thread.start()
    
    grab_thread_stop_requested = multiprocessing.Event()
    grab_thread = multiprocessing.Process(target=grab_frames, args=(0,12,32, save_queue, caption_queue, timestamp_shmem, grab_thread_stop_requested))
    grab_thread.start()
    
    do_experiment(caption_queue, timestamp_shmem)
    
    print 'Asking grabbing process to stop'
    grab_thread_stop_requested.set()
    grab_thread.join()
예제 #40
0
파일: netpack.py 프로젝트: pohmelie/netpack
class Netpack():
    def __init__(self, server_ips, mac=None, f=None):
        if f:
            self.f = f

        self.server_ips = server_ips
        raw_ips = lambda ip: bytes(map(int, ip.split(".")))
        self.server_ips_raw = tuple(map(raw_ips, server_ips))

        self.qi = Queue()
        self.qo = Queue()
        ConnectionManager(self.qi, self.qo, server_ips).start()

        self.ndisapi = windll.ndisapi
        self.kernel32 = windll.kernel32

        self.hnd = self.ndisapi.OpenFilterDriver(DRIVER_NAME_A)
        tmp = TCP_AdapterList()
        self.ndisapi.GetTcpipBoundAdaptersInfo(self.hnd, byref(tmp))

        self.mode = ADAPTER_MODE()
        self.mode.dwFlags = MSTCP_FLAG_SENT_TUNNEL | MSTCP_FLAG_RECV_TUNNEL

        adapter_id = None
        if mac != None:
            for i in range(tmp.m_nAdapterCount):
                if mac == pmac(tmp.m_czCurrentAddress[i]):
                    adapter_id = i + 1
                    break
            else:
                print("Can't find mac = {} in adapters list\n".format(mac))
        if adapter_id == None:
            print("Use 'ipconfig /all' to determine your mac address")
            print("you can write it in 'mac.txt' for more silent run\n")
            for i in range(tmp.m_nAdapterCount):
                print("{}). {}".format(i + 1, pmac(tmp.m_czCurrentAddress[i])))
            adapter_id = int(input("#: "))
            mac = pmac(tmp.m_czCurrentAddress[adapter_id - 1])

        print("\nUsing:\n\tadapter id = {}\n\tmac = {}".format(adapter_id, mac))
        self.mode.hAdapterHandle = tmp.m_nAdapterHandle[adapter_id - 1]

        self.hEvent = self.kernel32.CreateEventW(None, True, False, None)
        self.ndisapi.SetPacketEvent(self.hnd, self.mode.hAdapterHandle, self.hEvent)

        self.request = ETH_REQUEST()
        self.packetbuffer= INTERMEDIATE_BUFFER()
        self.request.EthPacket.Buffer = pointer(self.packetbuffer)
        self.request.hAdapterHandle = self.mode.hAdapterHandle

        self.ndisapi.SetAdapterMode(self.hnd, byref(self.mode))
        register(self.release)

    def release(self):
        self.mode.dwFlags = 0
        self.ndisapi.SetPacketEvent(self.hnd, self.mode.hAdapterHandle, None)
        self.kernel32.CloseHandle(self.hEvent)
        self.ndisapi.SetAdapterMode(self.hnd, byref(self.mode))
        self.ndisapi.FlushAdapterPacketQueue(self.hnd, self.mode.hAdapterHandle)

    def make_request(self, raw, m_dwDeviceFlags):
        _request = ETH_REQUEST()
        _packetbuffer= INTERMEDIATE_BUFFER()

        _request.EthPacket.Buffer = pointer(_packetbuffer)
        _request.hAdapterHandle = self.mode.hAdapterHandle
        _packetbuffer.m_dwDeviceFlags = m_dwDeviceFlags
        _packetbuffer.m_Flags = 130

        _packetbuffer.m_Length = len(raw)
        for i in range(_packetbuffer.m_Length):
            _packetbuffer.m_IBuffer[i] = raw[i]
        return _request

    def checkfortcp(self, t):#ipv4 & tcp protocols
        return t[12:14] == b"\x08\x00" and t[14 + 9] == 6

    def checkforips(self, t):
        s, d = t[14 + 12:14 + 16], t[14 + 16:14 + 20]
        return s in self.server_ips_raw or d in self.server_ips_raw

    def send(self, request):
        if request.EthPacket.Buffer.contents.m_dwDeviceFlags == PACKET_FLAG_ON_SEND:
            self.ndisapi.SendPacketToAdapter(self.hnd, byref(request))
        else:
            self.ndisapi.SendPacketToMstcp(self.hnd, byref(request))

    def sendpack(self, eth):
        ip = eth.next
        if ip.header.source in self.server_ips:
            flag = PACKET_FLAG_ON_RECEIVE
        else:
            flag = PACKET_FLAG_ON_SEND
        self.send(self.make_request(ip_stack.build(eth), flag))

    def mainloop(self):
        while True:
            self.kernel32.WaitForSingleObject(self.hEvent, 10)
            while self.ndisapi.ReadPacket(self.hnd, byref(self.request)):
                d = bytes(self.packetbuffer.m_IBuffer[:self.packetbuffer.m_Length])
                if self.checkfortcp(d) and self.checkforips(d):
                    self.qi.put_nowait(ip_stack.parse(d))
                else:
                    self.send(self.request)

            while not self.qo.empty():
                self.sendpack(self.qo.get())
            self.kernel32.ResetEvent(self.hEvent)
예제 #41
0
def main():
    first = True

    # initialise placeholders
    rendered_mesh = np.zeros((0, 2, 3))
    shapes = []
    np_mesh = None  #np.zeros((1,3))

    # Get first image to set image size
    cap = cv2.VideoCapture(0)
    _, image = cap.read()
    image_width = image.shape[1]
    image_height = image.shape[0]

    # Setup interprocess communication
    # exit_event is shared across all process
    exit_event = Event()

    # Setup de queues for interprocess comm
    dlib_in_queue, dlib_out_queue = Queue(maxsize=1), Queue(maxsize=1)
    face_detector_process = Dlib_detector_process(exit_event, dlib_in_queue,
                                                  dlib_out_queue, p)
    face_detector_process.start()

    eos_in_queue, eos_out_queue = Queue(maxsize=1), Queue(maxsize=1)
    eos_process = Eos_process(exit_event, eos_in_queue, eos_out_queue,
                              share_path, image_width, image_height)
    eos_process.start()

    matplotlib_in_queue, matplotlib_out_queue = Queue(maxsize=1), Queue(
        maxsize=1)
    matplotlib_process = Matplotlib_process(exit_event, matplotlib_in_queue,
                                            matplotlib_out_queue)
    matplotlib_process.start()

    # setup counters
    frame_count = 0
    last_time = time.time()

    try:
        while True:
            # Getting out image by webcam
            _, image = cap.read()
            frame_count += 1

            image_width = image.shape[1]
            image_height = image.shape[0]

            # Converting the image to gray scale
            gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

            # Feed the gray scale image to dlib for face detection
            try:
                dlib_in_queue.put_nowait(gray)
            except queues.Full:
                pass

            # Get back the landmarks (skip if not ready, same for all)
            try:
                shapes = dlib_out_queue.get_nowait()
            except queues.Empty:
                pass

            # Feed the landmarks to eos
            try:
                eos_in_queue.put_nowait(shapes)
            except queues.Full:
                pass

            # Get the mesh back
            try:
                np_mesh = eos_out_queue.get_nowait()
            except queues.Empty:
                pass

            # Send the mesh for rendering, only if it exists
            try:
                if np_mesh is not None:
                    matplotlib_in_queue.put_nowait(np_mesh)
            except queues.Full:
                pass

            # Get the render back
            try:
                rendered_mesh = matplotlib_out_queue.get_nowait()
            except queues.Empty:
                pass

            # Draw on our image, all the finded cordinate points (x,y)
            for shape in shapes:
                for (x, y) in shape:
                    cv2.circle(image, (x, y), 2, (0, 255, 0), -1)

            # Make a second image to put the rendered mesh onto
            face_image = np.zeros_like(image)
            face_image[:rendered_mesh.shape[0], :rendered_mesh.
                       shape[1]] = rendered_mesh[:, :, :3]

            # Juxtapose them
            image = np.hstack((image, face_image))

            # Draw fps counter
            font = cv2.FONT_HERSHEY_SIMPLEX
            cv2.putText(image,
                        '{:.0f} fps'.format(1 / (time.time() - last_time)),
                        (image_width, image_height - 50), font, 1, (0, 0, 0),
                        2, cv2.LINE_AA)
            last_time = time.time()

            # Show the image with opencv
            cv2.imshow("Output", image)

            k = cv2.waitKey(5) & 0xFF
            if k in [27, ord('q')]:
                break

        cv2.destroyAllWindows()
    finally:
        # Release all
        cap.release()
        exit_event.set()
        logging.debug('exit_event set')

        face_detector_process.join()
        logging.debug('dlib joined')
        eos_process.join()
        logging.debug('eos joined')
        matplotlib_process.join()
        logging.debug('all process joined')
        for queue in [
                dlib_in_queue,
                dlib_out_queue,
                eos_in_queue,
                eos_out_queue,
                matplotlib_in_queue,
                matplotlib_out_queue,
        ]:
            clear_queue(queue)
        logging.debug('queues cleared')
예제 #42
0
print(q.full())
q.put('three')
print(q.full())

# print(q.qsize())

#因为消息列队已满下面的try都会抛出异常,第一个try会等待2秒后再抛出异常,第二个Try会立刻抛出异常
# 事实是第一个报错 q.qsize()也报错

# try:
#     q.put('four',True,2)
# except:
#     print('消息队列已满,现有消息数量:%s'%q.qsize())

try:
    q.put_nowait('four')
except:
    print('消息队列已满')

# 推荐的方式 先判断消息队列是否已满,再写入
if not q.full():
    q.put_nowait('four')

# 读取消息时,先判断消息队列是否为空,再读取
if not q.empty():
    for i in range(3):
        print(q.get_nowait())
"""

说明:
1、初始化Queue()对象时(例如:q=Queue()),若括号中没有指定最大可接收的消息数量,或数量为负值,
예제 #43
0
        s1 = socket.socket( socket.AF_PACKET , socket.SOCK_RAW , socket.ntohs(0x0003))
        s1.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        s1.bind((mirrorport,3))
    except socket.error , msg:
        print 'Socket could not be created.'
        sys.exit()
    serveripn=[]
    for ip in serverip:
        serveripn.append(socket.inet_aton(ip))
    while True:
        try:
            packet = s1.recvfrom(4096)
            packet = packet[0]
            if packet[12:13]=='\x08' and packet[23]=='\x06':
                if packet[26:30] in serveripn or packet[30:34] in serveripn:
                    queue0.put_nowait(packet)
        except:
            pass

def pre():
    r = redis.StrictRedis(host=redisserver, port=6379, db=1)
    c = redis.StrictRedis(host=redisserver, port=6379, db=2)
    h1 = redis.StrictRedis(host=redisserver, port=6379, db=3)
    h2 = redis.StrictRedis(host=redisserver, port=6379, db=4)
    while True:
        try:
            packet=queue0.get()
            if packet:
                s_addr = socket.inet_ntoa(packet[26:30]);
                d_addr = socket.inet_ntoa(packet[30:34]);
            
예제 #44
0
class JpegReceiver(Jpeg):
    
    def __init__(self, host, port, maxq=5):
        self.addr = (host, port)
        self.myname = 'skywatcher01' #socket.gethostname()
        self.q = Queue(maxsize=maxq)
        self.running = False
        self.inb = b''
        self.lsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.lsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        self.sel = selectors.DefaultSelector()
        self.sel.register(self.lsock, selectors.EVENT_READ, self.accept_wrapper)
    
    def accept_wrapper(self, key, mask):
        sock = key.fileobj
        conn, addr = sock.accept()
        print('Accepted connection from {0}:{1}'.format(*addr))
        conn.setblocking(False)
        self.sel.register(conn, selectors.EVENT_READ, self.service_connection)

    def service_connection(self, key, mask):
        sock = key.fileobj
        recv_data = sock.recv(1024)
        if recv_data:
            # This approach is not, in general, correct for finding the start/end
            # of the JPEG image in the TCP stream. However, for the simple
            # MJPEG stream from the web cam, it appears to work fine.
            # Stream is modified with additional header that includes:
            # SOI(2b):NameLength(1b):RpiName(NameLength):(\n)-SOI... 
            if self.EOI in recv_data:
                eoi = recv_data.find(self.EOI)
                soi = self.inb.find(self.SOI)
                jpeg = self.inb[soi:] + recv_data[:eoi+2]
                self.inb = recv_data[eoi+2:]
                COMlenpos = jpeg.find(self.COM) + 2
                try:
                    COMlen, = struct.unpack('>h', jpeg[COMlenpos:COMlenpos+2])
                    bname = jpeg[COMlenpos+2:COMlenpos+COMlen]
                    name = bname.decode()
                    self.q.put_nowait((name, jpeg))
                except UnicodeDecodeError:
                    print('[INFO] Corrupt Comment field....')
                except queue.Full:
                    self.q.get()
            else:
                self.inb += recv_data
        else:
            self.sel.unregister(sock)
            sock.close()
     
    def run(self):
        w = Process(target=self.worker)
        self.running = True
        w.start()

    def stop(self):
        self.running = False
        self.sel.unregister(self.lsock)
        self.lsock.close()
        self.q.close()
        self.sel.close()
            
    def worker(self):
        self.lsock.bind(self.addr)
        self.lsock.listen()
        self.lsock.setblocking(False)
        print("listeneing on {0}:{1}".format(*self.addr))

        try:
            while self.running:
                events = self.sel.select(timeout=None)
                for key, mask in events:
                    callback = key.data
                    callback(key, mask)
        
        except KeyboardInterrupt: 
            print("[INFO] Keyboard Interrupt, stopping and closing socket...")
            self.stop()
예제 #45
0
def grabCam(cam_q: Queue, is_running, form, mode="camera", FrameRate=30, secs=10, c_num=0, savepath="",
            saving=Value(c_bool, False)):
    if not os.path.isdir(os.path.join(savepath, "frames")):
        os.mkdir(os.path.join(savepath, "frames"))
        savepath = os.path.join(savepath, "frames")
    print(f"The camera is action in {mode} mode")
    """
    args:
        cam_q : the Pipe comunnecate with cam2img
    kwargs:
        mode : the mode of thish function
            allow value:
            {"camera" -> default
             "video"}

        FrameRate : the fps of camera
        secs : recording time
        c_num : the number of camera
    """

    shape, dtype = (0, 0), 'uint8'
    if mode == "video":

        video = cv2.VideoCapture("F_F_03.avi")

        shape = (int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)),
                 int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), 3)
        dtype = 'uint8'
        form[0] = shape
        form[1] = dtype

        s = 0
        if not video.isOpened():
            print("fail")
        print("video")
        start = time.time()
        while video.isOpened():
            ret, frame = video.read()
            s += 1
            cam_q.put_nowait(frame.tobytes())
            time.sleep(0.03)
            if time.time() - start > secs:
                is_running.value = False

                break
        print("stop")
        return

    if mode == "camera":

        # conecting to the first available camera
        try:
            T1 = pylon.TlFactory.GetInstance()
            lstDevices = T1.EnumerateDevices()
            if len(lstDevices) == 0:
                print("no camera is detected")
            if len(lstDevices) <= c_num:
                print(f"ther is no number {c_num} camera")
            camera = pylon.InstantCamera(T1.CreateFirstDevice(lstDevices[c_num]))

            print("using camera : ",
                  camera.GetDeviceInfo().GetModelName())
        except:
            print("init fail")
            exit()

        camera.Open()
        camera.AcquisitionFrameRateEnable.SetValue(True)
        camera.AcquisitionFrameRate.SetValue(FrameRate)
        camera.BinningVertical.SetValue(1)
        camera.BinningHorizontal.SetValue(1)

        PixelFormat = camera.PixelFormat.GetValue()

        print("resolution : ", f"{camera.Width.GetValue()}X{camera.Height.GetValue()}")
        print("Format : ", PixelFormat)

        camera.BinningVerticalMode.SetValue("Average")
        camera.BinningHorizontalMode.SetValue("Average")

        if camera.Width.GetValue() / 1000 > 1 or camera.Height.GetValue() / 1000 > 1:
            rat = max(camera.Height.GetValue() / 1000, camera.Width.GetValue() / 1000)
            print("binning rate = ", rat)
            camera.BinningVertical.SetValue(int(rat))
            camera.BinningHorizontal.SetValue(int(rat))

        grabResult = camera.GrabOne(1000)
        if grabResult.GrabSucceeded():
            pt = grabResult.GetPixelType()
            if pylon.IsPacked(pt):
                _, new_pt = grabResult._Unpack10or12BitPacked()
                shape, dtype, pixelformat = grabResult.GetImageFormat(new_pt)
            else:
                shape, dtype, pixelformat = grabResult.GetImageFormat(pt)
                _ = grabResult.GetImageBuffer()

        else:
            print("grab Failed")
            exit()

        form[0] = shape
        form[1] = dtype
        counter = FrameRate * secs

        s = 0
        print(f"starting recording {secs} secs with {FrameRate}fps at path:", savepath)

        camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly)
        start = time.time()
        with Pool(2) as pool:

            while camera.IsGrabbing():
                grabResult = camera.RetrieveResult(6000, pylon.TimeoutHandling_ThrowException)

                counter = counter - 1
                if grabResult.GrabSucceeded():
                    buff = grabResult.GetBuffer()
                    cam_q.put_nowait(buff)
                    if saving.value:
                        pool.apply_async(savebuff, args=(buff, s, shape,), kwds={"dtype": dtype, "savepath": savepath})
                    s += 1

                grabResult.Release()

                if counter == 0:
                    print(f"stop recording {s} frames in time :", time.time() - start)
                    is_running.value = False
                    break
            # Releasing the resource
            camera.StopGrabbing()
            camera.BinningVertical.SetValue(1)
            camera.BinningHorizontal.SetValue(1)
            camera.Close()
            cam_q.close()

        print("stop")
        return
예제 #46
0
if __name__ == "__main__":
    q = Queue(3)
    q.put("消息1")
    q.put("消息2")
    print(q.full())
    q.put("消息3")
    print(q.full())

    # 从消息队列取数据 推荐两种方法 1 捕获异常   2  判断

    try :
        q.put("消息4",True , 2)   # 尝试写入,如果满了 2秒后抛出异常
    except:
        print("已经满了,现有消息%s条"%q.qsize())
    try :
        q.put_nowait("消息4")   # 尝试写入 如果满了立即抛出异常
        #相当于q.put(item,False)
    except:
        print("已经满了,现有消息%s条"%q.qsize())

    if not q.full():
        q.put_nowait("消息4")

    if not q.empty():
        for i in range(q.qsize()):
            print(q.get_nowait())




'''
예제 #47
0
파일: pyTower.py 프로젝트: jmhobbs/pyTower
			f = open( root + '/' + file )
			map_yaml = yaml.load( f )
			f.close()
			# TODO: Version checking
			map = Map( map_yaml, import_path, FullPath( root ) )
			maps.append( map )
			maps_min.append( map.name )

window.set_loading( 'Spawning Menu' )

tx = Queue()
rx = Queue()

menus = Menus( 'qt', tx, rx )

tx.put_nowait( messages.Message( messages.MAPS, {'maps': maps_min } ) )
menus.main_menu()

game = Game()

window.set_loading( 'Ready!' )

while True:

	# Only redraw if we have to...
	for event in pygame.event.get():
		if event.type == pygame.VIDEOEXPOSE:
			window.update()
		elif event.type == pygame.QUIT:
			tx.put_nowait( messages.Message( messages.QUIT ) )
			pygame.quit()
예제 #48
0
class KittiLoader(object):

    # return:
    # tag (N)
    # label (N) (N')
    # rgb (N, H, W, C)
    # raw_lidar (N) (N', 4)
    # vox_feature
    # vox_number
    # vox_coordinate

    def __init__(self,
                 object_dir='.',
                 queue_size=20,
                 require_shuffle=False,
                 is_testset=True,
                 batch_size=1,
                 use_multi_process_num=0,
                 split_file='',
                 multi_gpu_sum=1,
                 aug=False):
        assert (use_multi_process_num >= 0)
        self.object_dir = object_dir
        self.is_testset = is_testset
        self.use_multi_process_num = use_multi_process_num if not self.is_testset else 1
        self.require_shuffle = require_shuffle if not self.is_testset else False
        self.batch_size = batch_size
        self.split_file = split_file
        self.multi_gpu_sum = multi_gpu_sum
        self.aug = aug

        if self.split_file != '':
            # use split file
            _tag = []
            self.f_rgb, self.f_lidar, self.f_label = [], [], []
            for line in open(self.split_file, 'r').readlines():
                line = line[:-1]  # remove '\n'
                _tag.append(line)
                self.f_rgb.append(
                    os.path.join(self.object_dir, 'image_2', line + '.png'))
                self.f_lidar.append(
                    os.path.join(self.object_dir, 'velodyne', line + '.bin'))
                self.f_label.append(
                    os.path.join(self.object_dir, 'label_2', line + '.txt'))
        else:
            self.f_rgb = glob.glob(
                os.path.join(self.object_dir, 'image_2', '*.png'))
            self.f_rgb.sort()
            self.f_lidar = glob.glob(
                os.path.join(self.object_dir, 'velodyne', '*.bin'))
            self.f_lidar.sort()
            self.f_label = glob.glob(
                os.path.join(self.object_dir, 'label_2', '*.txt'))
            self.f_label.sort()

        self.data_tag = [
            name.split('/')[-1].split('.')[-2] for name in self.f_rgb
        ]
        assert (len(self.data_tag) == len(self.f_rgb) == len(self.f_lidar))
        self.dataset_size = len(self.f_rgb)
        self.already_extract_data = 0
        self.cur_frame_info = ''

        print("Dataset total length: {}".format(self.dataset_size))
        if self.require_shuffle:
            self.shuffle_dataset()

        self.queue_size = queue_size
        self.require_shuffle = require_shuffle
        # must use the queue provided by multiprocessing module(only this can be shared)
        self.dataset_queue = Queue()

        self.load_index = 0
        if self.use_multi_process_num == 0:
            self.loader_worker = [
                threading.Thread(target=self.loader_worker_main,
                                 args=(self.batch_size, ))
            ]
        else:
            self.loader_worker = [
                Process(target=self.loader_worker_main,
                        args=(self.batch_size, ))
                for i in range(self.use_multi_process_num)
            ]
        self.work_exit = Value('i', 0)
        [i.start() for i in self.loader_worker]

        # This operation is not thread-safe
        self.rgb_shape = (cfg.IMAGE_HEIGHT, cfg.IMAGE_WIDTH, 3)

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.work_exit.value = True

    def __len__(self):
        return self.dataset_size

    def fill_queue(self, batch_size=0):
        load_index = self.load_index
        self.load_index += batch_size
        if self.load_index >= self.dataset_size:
            if not self.is_testset:  # test set just end
                if self.require_shuffle:
                    self.shuffle_dataset()
                load_index = 0
                self.load_index = load_index + batch_size
            else:
                self.work_exit.value = True

        labels, tag, voxel, rgb, raw_lidar = [], [], [], [], []
        for _ in range(batch_size):
            try:
                if self.aug:
                    ret = aug_data(self.data_tag[load_index], self.object_dir)
                    tag.append(ret[0])
                    rgb.append(ret[1])
                    raw_lidar.append(ret[2])
                    voxel.append(ret[3])
                    labels.append(ret[4])
                else:
                    rgb.append(
                        cv2.resize(cv2.imread(self.f_rgb[load_index]),
                                   (cfg.IMAGE_WIDTH, cfg.IMAGE_HEIGHT)))
                    raw_lidar.append(
                        np.fromfile(self.f_lidar[load_index],
                                    dtype=np.float32).reshape((-1, 4)))
                    if not self.is_testset:
                        labels.append([
                            line for line in open(self.f_label[load_index],
                                                  'r').readlines()
                        ])
                    else:
                        labels.append([''])
                    tag.append(self.data_tag[load_index])
                    voxel.append(process_pointcloud(raw_lidar[-1]))

                load_index += 1
            except:
                if not self.is_testset:  # test set just end
                    self.load_index = 0
                    if self.require_shuffle:
                        self.shuffle_dataset()
                else:
                    self.work_exit.value = True

        # only for voxel -> [gpu, k_single_batch, ...]
        vox_feature, vox_number, vox_coordinate = [], [], []
        single_batch_size = int(self.batch_size / self.multi_gpu_sum)
        for idx in range(self.multi_gpu_sum):
            _, per_vox_feature, per_vox_number, per_vox_coordinate = build_input(
                voxel[idx * single_batch_size:(idx + 1) * single_batch_size])
            vox_feature.append(per_vox_feature)
            vox_number.append(per_vox_number)
            vox_coordinate.append(per_vox_coordinate)

        self.dataset_queue.put_nowait(
            (labels, (vox_feature, vox_number, vox_coordinate), rgb, raw_lidar,
             tag))

    def load(self):
        try:
            if self.is_testset and self.already_extract_data >= self.dataset_size:
                return None

            buff = self.dataset_queue.get()
            label = buff[0]
            vox_feature = buff[1][0]
            vox_number = buff[1][1]
            vox_coordinate = buff[1][2]
            rgb = buff[2]
            raw_lidar = buff[3]
            tag = buff[4]
            self.cur_frame_info = buff[4]

            self.already_extract_data += self.batch_size

            ret = (np.array(tag), np.array(label), np.array(vox_feature),
                   np.array(vox_number), np.array(vox_coordinate),
                   np.array(rgb), np.array(raw_lidar))
        except:
            print("Dataset empty!")
            ret = None
        return ret

    def load_specified(self, index=0):
        rgb = cv2.resize(cv2.imread(self.f_rgb[index]),
                         (cfg.IMAGE_WIDTH, cfg.IMAGE_HEIGHT))
        raw_lidar = np.fromfile(self.f_lidar[index], dtype=np.float32).reshape(
            (-1, 4))
        labels = [line for line in open(self.f_label[index], 'r').readlines()]
        tag = self.data_tag[index]

        if self.is_testset:
            ret = (
                np.array([tag]),
                np.array([rgb]),
                np.array([raw_lidar]),
            )
        else:
            ret = (
                np.array([tag]),
                np.array([labels]),
                np.array([rgb]),
                np.array([raw_lidar]),
            )
        return ret

    def loader_worker_main(self, batch_size):
        if self.require_shuffle:
            self.shuffle_dataset()
        while not self.work_exit.value:
            if self.dataset_queue.qsize() >= self.queue_size // 2:
                time.sleep(1)
            else:
                # since we use multiprocessing, 1 is ok
                self.fill_queue(batch_size)

    def get_shape(self):
        return self.rgb_shape

    def shuffle_dataset(self):
        # to prevent diff loader load same data
        index = shuffle([i for i in range(len(self.f_label))],
                        random_state=random.randint(
                            0, self.use_multi_process_num**5))
        self.f_label = [self.f_label[i] for i in index]
        self.f_rgb = [self.f_rgb[i] for i in index]
        self.f_lidar = [self.f_lidar[i] for i in index]
        self.data_tag = [self.data_tag[i] for i in index]

    def get_frame_info(self):
        return self.cur_frame_info
예제 #49
0
class JobManager(object):
    """Handles dividing up the scraping work and starting the scraper threads"""

    def __init__(self, user, passwd, save_to_db, config):
        """Divide the work up into ScrapeJobs"""

        self.user = user
        self.passwd = passwd
        self.config = config
        self.save = save_to_db
        self.jobs = Queue()
        self.semesters = config.get('semesters', None)

        # Enforce a range of 1 - 10 threads with a default of 5
        self.config["threads"] = max(min(self.config.get("threads", 5), 10), 1)
        self.config["job"] = self.config.get("job", ScrapeJob())

        # Divide up the work for the number of threads
        self.make_jobs()

    def start(self):
        """Start running the scraping threads"""

        self.start_jobs()

    def make_jobs(self):
        """Takes the configuration and returns a list of jobs"""

        job = self.config["job"]
        letters = job["letters"]
        threads_per_letter = max(self.config.get("threads_per_letter", int((self.config["threads"] - 1)/len(letters) + 1)), 1)

        for l in letters:
            job_letter = ScrapeJob(job)
            job_letter["letters"] = l
            for s in range(0, threads_per_letter):
                temp = ScrapeJob(job_letter)
                temp["subject_start"] = job["subject_start"] + s
                temp["subject_step"] = threads_per_letter
                logging.info(u"Made job: {0}".format(temp))
                self.jobs.put_nowait(temp)

    def run_jobs(self, queue):
        """Initialize a SOLUS session and run the jobs"""

        # Initialize the session
        try:
            session = SolusSession(self.user, self.passwd)
        except EnvironmentError as e:
            logging.critical(e)
            # Can't log in, therefore can't do any jobs
            # As long as at least 1 of the threads can log in,
            # the scraper will still work
            return

        # Run all the jobs in the job queue
        while True:
            try:
                job = queue.get_nowait()
            except Empty as e:
                return

            # Run the job
            if PROFILE:
                import cProfile
                cProfile.runctx("SolusScraper(session, job, self.db).start()", globals(), locals())
            else:
                SolusScraper(session, job, self.db).start()
                

    def start_jobs(self):
        """Start the threads that perform the jobs"""

        threads = []
        for x in range(self.config["threads"]):
            threads.append(Process(target=self.run_jobs, args=(self.jobs,)))
            threads[-1].start()

        for t in threads:
            t.join()

    def parse_courses(self):
        """Course object generator matching interface for BaseParser."""
        # Setup the logger before any logging happens
        _init_logging()

        try:
            session = SolusSession(self.user, self.passwd)
        except EnvironmentError as e:
            logging.critical(e)
            # Can't log in, therefore can't do any jobs
            # As long as at least 1 of the threads can log in,
            # the scraper will still work
            return

        for course in SolusScraper(session, ScrapeJob(), True, self.semesters).start():
            yield course
예제 #50
0
class CueWebServer(object):
    def __init__(self, configHolder):
        self._cueConfig = configHolder
        self._serverMessageQueue = None
        self._serverCommandQueue = None
        self._webInputQueue = Queue(256)
        self._webOutputQueue = Queue(10)
        self._cueWebServerProcess = None
        self._oldBarPos = None
        self._oldBeatPos = None
        self._oldConfigName = ""
        self._oldStreamConfig = []

    def startCueWebServerProcess(self):
        self._serverMessageQueue = Queue(256)
        self._serverCommandQueue = Queue(256)
        host = self._cueConfig.getCueWebServerAddress()
        port = self._cueConfig.getCueWebServerPort()
        self._cueWebServerProcess = Process(
            target=guiWebServerProcess,
            args=(host, port, self._serverMessageQueue,
                  self._serverCommandQueue, self._webInputQueue,
                  self._webOutputQueue))
        self._cueWebServerProcess.name = "cueWebServer"
        self._cueWebServerProcess.start()

    def requestCueWebServerProcessToStop(self):
        if (self._cueWebServerProcess != None):
            print "Stopping cueWebServer"
            self._serverCommandQueue.put("QUIT")

    def hasCueWebServerProcessShutdownNicely(self):
        if (self._cueWebServerProcess == None):
            return True
        else:
            try:
                self._serverMessageQueue.get_nowait()
            except Empty:
                pass
            if (self._cueWebServerProcess.is_alive() == False):
                self._cueWebServerProcess = None
                return True
            return False

    def forceCueWebServerProcessToStop(self):
        if (self._cueWebServerProcess != None):
            if (self._cueWebServerProcess.is_alive()):
                print "Cue server daemon did not respond to quit command. Terminating."
                self._cueWebServerProcess.terminate()
        self._cueWebServerProcess = None

    def updateFromConfig(self, configHolder, activeConfigName):
        self._cueConfig = configHolder
        if (activeConfigName != self._oldConfigName):
            print str(self._oldConfigName) + " != " + str(activeConfigName)
            self._oldConfigName = activeConfigName
            try:
                self._serverCommandQueue.put_nowait(
                    ["taktInfo_configName", activeConfigName])
            except:
                pass

        if (self._cueConfig.getCueStreamList() != self._oldStreamConfig):
            print str(self._cueConfig.getCueStreamList()) + " != " + str(
                self._oldStreamConfig)
            self._oldStreamConfig = self._cueConfig.getCueStreamList()
            try:
                self._serverCommandQueue.put_nowait(
                    self._cueConfig.getCueStreamList())
            except:
                pass

    def updateTimingInfo(self, bar, beat):
        if ((self._oldBarPos != bar) or (self._oldBeatPos != beat)):
            self._oldBarPos = bar
            self._oldBeatPos = beat
            try:
                self._serverCommandQueue.put_nowait(
                    ["taktInfo_timingInfo",
                     str(bar) + ":" + str(beat)])
            except:
                pass

    def processCueWebServerMessages(self):
        try:
            serverMessage = self._serverMessageQueue.get_nowait()
            serverMessageXml = stringToXml(serverMessage)
            if (serverMessageXml != None):
                if (serverMessageXml.tag == "servermessage"):
                    print "CueWebServer Message: " + serverMessageXml.get(
                        "message")
                elif (serverMessageXml.tag == "serverLog"):
                    print "CueWebServerLog: " + serverMessageXml.get(
                        "server"), serverMessageXml.get(
                            "timeStamp"), serverMessageXml.get("message")
        except Empty:
            pass
예제 #51
0
class WorkerProcess(Process):
	def __init__(self, id, config, sequence, hist_obj, results_path, log_id):
		Process.__init__(self)
		self.id = id
		self.config = config
		self.sequence = sequence
		self.hist_obj = hist_obj
		self.agent = Agent(self.id, config, sequence)
		self.results_path = results_path
		self.log_id = log_id
		self.leader_send = None
		self.leader_recv = None
		self.support_send = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.support_recv = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.root_div_send = None
		self.leader_div_send = None
		self.agent_div_recv = [None for i in range(1, self.config.num_agents)] if self.agent.id_leader == None else None
		self.support_div_recv = [None for i in range(1, self.config.num_sup+1)] if self.agent.id_supporters else None
		self.leader_reset_send = None
		self.leader_reset_recv = None
		self.support_reset_send = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.support_reset_recv = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.event_restart = Event()
		self.stop_event = Event()
		self.support_stop_event = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
		self.energy_number = Queue(1)
		self.support_energy_number = [None for i in range(0, self.config.num_sup)] if id * self.config.num_sup + 1 < self.config.num_agents else None
	
	def select_rand_solution(self, solutions):
		index = 0
		counter_sol = 0
		for sol in solutions:
			if sol != None:
				counter_sol += 1
		counter_sol -= 1 
		index = random.randint(0, counter_sol)
		return index
	
	def fitness_roulette_selection(self, solutions):
		fitness_last = 0
		fitness_total = 0
		fitness_acum = 0
		index = 0
		selection = random.uniform(0, 1)
		for sol in solutions:
			if sol != None:
				fitness_last = sol.energy_value
		for sol in solutions:
			if sol != None:
				fitness_total += fitness_last - sol.energy_value
		if fitness_total != 0:
			for sol in solutions:
				if sol != None:
					fitness_acum += fitness_last - sol.energy_value
					prob = fitness_acum / fitness_total
					if selection <= prob:
						break
					index += 1
		return index
	
	def send_solution(self, solution, queue):
		time_send_start = datetime.datetime.now()
		queue.put(copy.deepcopy(solution))
		self.agent.trx_send += 1
		self.agent.time_send += datetime.datetime.now() - time_send_start
	
	def receive_solution(self, queue, is_leader):
		time_receive_start = datetime.datetime.now()
		solution = queue.get()
		if is_leader:
			self.agent.update(solution)
		else:
			index = 0
			for sol in solution:
				if sol != None:
					self.agent.leader_pockets[index] = copy.deepcopy(sol)
				else:
					break
				index += 1
		self.agent.trx_receive += 1
		self.agent.time_receive += datetime.datetime.now() - time_receive_start
	
	# Para los pickles no olvidar agregar pickle.loads en agente 0 durante el reset
	def send_solution_pickle(self, solution, queue):
		time_send_start = datetime.datetime.now()
		buff = pickle.dumps(solution, 2)
		queue.put(buff)
		self.agent.trx_send += 1
		self.agent.time_send += datetime.datetime.now() - time_send_start
	
	def receive_solution_pickle(self, queue, is_leader):
		time_receive_start = datetime.datetime.now()
		buff = queue.get()
		solution = pickle.loads(buff)
		self.agent.trx_receive += 1
		self.agent.time_receive += datetime.datetime.now() - time_receive_start
		if is_leader:
			self.agent.update(solution)
		else:
			index = 0
			for sol in solution:
				if sol != None:
					self.agent.leader_pockets[index] = copy.deepcopy(sol)
				else:
					break
				index += 1
	
	def save_results(self):
		if not os.path.exists(self.results_path):
			try:
				os.makedirs(self.results_path)
			except:
				pass
		
		if self.agent.id_leader == None:
			fout = open('%s/run-summary.txt' % (self.results_path), 'w')
			
			print 'Parameters'
			fout.write('Parameters\n')
			print '--- pockets: %d' % (self.config.num_pockets)
			fout.write('--- pockets: %d\n' % (self.config.num_pockets))
			print '--- agents: %d' % (self.config.num_agents)
			fout.write('--- agents: %d\n' % (self.config.num_agents))
			print '--- supporters per leader: %d' % (self.config.num_sup)
			fout.write('--- supporters per leader: %d\n' % (self.config.num_sup))
			print '--- do reset: %s' % (str(self.config.if_reset))
			fout.write('--- do reset: %s\n' % (str(self.config.if_reset)))
			print '--- prob radius: %f' % (self.hist_obj.prob_radius)
			fout.write('--- prob radius: %f\n' % (self.hist_obj.prob_radius))
			print '--- prob of ls: %f' % (self.config.test_ls_prob)
			fout.write('--- prob of ls: %f\n' % (self.config.test_ls_prob))
			print '--- simulated annealing decrease factor: %f' % (self.config.test_ls_fact)
			fout.write('--- simulated annealing decrease factor: %f\n' % (self.config.test_ls_fact))
			print '--- prob of jump before ls: %f' % (self.config.test_jump_prob)
			fout.write('--- prob of jump before ls: %f\n' % (self.config.test_jump_prob))
			print '--- jump decrease factor: %f' % (self.config.test_jump_fact)
			fout.write('--- jump decrease factor: %f\n' % (self.config.test_jump_fact))
			print '--- initial temperature for simulated annealing: %d' % (self.config.test_temp_init)
			fout.write('--- initial temperature for simulated annealing: %d\n' % (self.config.test_temp_init))
			print '--- initial max jump distance: %f' % (self.config.test_jump_dist)
			fout.write('--- initial max jump distance: %f\n' % (self.config.test_jump_dist))
			print '--- generations without improvements: %d' % (self.config.test_noimprove)
			fout.write('--- generations without improvements: %d\n' % (self.config.test_noimprove))
			print '--- prob of crossover: %f' % (self.config.crossover_prob)
			fout.write('--- prob of crossover: %f\n' % (self.config.crossover_prob))
			
			fout.close()
			
			for i in range(0, self.config.num_pockets):
				if self.agent.pockets[i] != None:
					self.agent.pockets[i].pose.dump_pdb('%s/pocket-%02d.pdb' % (self.results_path, i))
		
		fout = open('%s/log-agent-%02d.txt' % (self.results_path, self.id), 'w')

		print '\n%s' % (self.agent)
		fout.write('%s\n' % (self.agent))
		print 'Total generation of agent_%02d: %d' % (self.id, self.agent.generation)
		fout.write('Total generation of agent_%02d: %d\n' % (self.id, self.agent.generation))
		print 'Total restarts of agent_%02d: %d' % (self.id, self.agent.restarts)
		fout.write('Total restarts of agent_%02d: %d\n' % (self.id, self.agent.restarts))
		print 'Total time LocalSearch of agent_%02d: %s' % (self.id, str(self.agent.time_ls))
		fout.write( 'Total time LocalSearch of agent_%02d: %s\n' % (self.id, str(self.agent.time_ls)))
		print 'Total time Diversity calculations of agent_%02d: %s' % (self.id, str(self.agent.time_div))
		fout.write( 'Total time Diversity calculations of agent_%02d: %s\n' % (self.id, str(self.agent.time_div)))
		print 'Total time SEND of agent_%02d: %s' % (self.id, str(self.agent.time_send))
		fout.write( 'Total time SEND of agent_%02d: %s\n' % (self.id, str(self.agent.time_send)))
		print 'Total transactions SEND of agent_%02d: %s' % (self.id, str(self.agent.trx_send))
		fout.write( 'Total transactions SEND of agent_%02d: %s\n' % (self.id, str(self.agent.trx_send)))
		print 'Total time RECEIVE of agent_%02d: %s' % (self.id, str(self.agent.time_receive))
		fout.write( 'Total time RECEIVE of agent_%02d: %s\n' % (self.id, str(self.agent.time_receive)))
		print 'Total transactions RECEIVE of agent_%02d: %s\n' % (self.id, str(self.agent.trx_receive))
		fout.write( 'Total transactions RECEIVE of agent_%02d: %s\n\n' % (self.id, str(self.agent.trx_receive)))
		
		fout.close()
		self.agent.status_write('%s/log-agent-%02d.txt' % (self.results_path, self.id))
	
	
	def make_server_manager(self, port, authkey):
		queue_send = Queue(1)
		queue_recv = Queue()
		queue_div_recv = Queue(1)
		queue_reset_send = Queue(1)
		queue_reset_recv = Queue(1)
		stop_event = Event()
		energy_number = Queue(1)
		
		class ServerManager(SyncManager):
			pass

		ServerManager.register('get_queue_send', callable=lambda: queue_send)
		ServerManager.register('get_queue_recv', callable=lambda: queue_recv)
		ServerManager.register('get_queue_div_recv', callable=lambda: queue_div_recv)
		ServerManager.register('get_queue_reset_send', callable=lambda: queue_reset_send)
		ServerManager.register('get_queue_reset_recv', callable=lambda: queue_reset_recv)
		ServerManager.register('get_stop_event', callable=lambda: stop_event)
		ServerManager.register('get_energy_number', callable=lambda: energy_number)

		manager = ServerManager(address=('', port), authkey=authkey)
		manager.start()
		print 'Agent %d server started at port %d' % (self.id, port)
		return manager
	
	def make_div_server_manager(self, port, authkey):
		queue_div_recv = Queue()
		class ServerManager(SyncManager):
			pass
		ServerManager.register('get_queue_div_recv', callable=lambda: queue_div_recv)
		manager = ServerManager(address=('', port), authkey=authkey)
		manager.start()
		print 'Agent %d div server started at port %d' % (self.id, port)
		return manager
	
	
	def make_client_manager(self, host, port, authkey):
		class ClientManager(SyncManager):
			pass

		ClientManager.register('get_queue_send')
		ClientManager.register('get_queue_recv')
		ClientManager.register('get_queue_div_recv')
		ClientManager.register('get_queue_reset_send')
		ClientManager.register('get_queue_reset_recv')
		ClientManager.register('get_stop_event')
		ClientManager.register('get_energy_number')

		manager = ClientManager(address=(host, port), authkey=authkey)
		manager.connect()
		print 'Agent %d client connected to %s at port %d ' % (self.id, host, port)
		return manager
	
	def make_div_client_manager(self, host, port, authkey):
		class ClientManager(SyncManager):
			pass
		ClientManager.register('get_queue_div_recv')
		manager = ClientManager(address=(host, port), authkey=authkey)
		manager.connect()
		print 'Agent %d div client connected to %s at port %d ' % (self.id, host, port)
		return manager
	
	def run_servers(self):
		servers = [None for i in range(0, self.config.num_sup)]
		div_servers = [None for i in range(1, self.config.num_agents)]
		
		if self.agent.id_leader == None:
			for i in range(1, self.config.num_agents):
				port = self.config.root_hosts[i][1]
				div_servers[i-1] = self.make_div_server_manager(port, '')
				self.agent_div_recv[i-1] = div_servers[i-1].get_queue_div_recv()
			servers += div_servers
		
		for i in range(0, self.config.num_sup):
			host = self.config.hosts[self.agent.id_supporters[i]][0]
			port = self.config.hosts[self.agent.id_supporters[i]][1]
			path = self.config.hosts[self.agent.id_supporters[i]][2]
			servers[i] = self.make_server_manager(port, '')
			self.support_send[i] = servers[i].get_queue_send()
			self.support_recv[i] = servers[i].get_queue_recv()
			self.support_div_recv[i] = servers[i].get_queue_div_recv()
			self.support_reset_send[i] = servers[i].get_queue_reset_send()
			self.support_reset_recv[i] = servers[i].get_queue_reset_recv()
			self.support_stop_event[i] = servers[i].get_stop_event()
			self.support_energy_number[i] = servers[i].get_energy_number()
			
			if not os.path.exists(self.config.logs_path):
				os.makedirs(self.config.logs_path)
			
			argv = (str(self.config.protein) + ' ' + str(self.config.num_levels) + ' ' + str(self.config.num_sup) + ' ' + str(self.config.max_agents) + ' ' +
					str(self.config.num_pockets) + ' ' + str(self.config.if_reset) + ' ' + str(self.config.test_noimprove) + ' ' + str(self.config.score_weight) + ' ' +
					str(self.config.sasa_weight) + ' ' + str(self.config.energy_limit) + ' ' + str(self.agent.id_supporters[i]))
			cmd = 'python memetic_parallel.py %s %d > %s/memetic_parallel_%03d_agent-%02d.log 2>&1' % (argv, self.log_id, self.config.logs_path, self.log_id, self.agent.id_supporters[i])
			subprocess.Popen(['ssh', host, 'cd ' + path + ' && ' + cmd], stdin = None, stdout = None, stderr = None)
		
		return servers
	
	def run_client(self):
		host = self.config.hosts[self.agent.id_leader][0]
		port = self.config.hosts[self.id][1]
		root_host = self.config.root_hosts[0][0]
		root_port = self.config.root_hosts[self.id][1]
		client = self.make_client_manager(host, port, '')
		root_client = self.make_div_client_manager(root_host, root_port, '')
		self.leader_send = client.get_queue_recv()
		self.leader_recv = client.get_queue_send()
		self.root_div_send = root_client.get_queue_div_recv()
		self.leader_div_send = client.get_queue_div_recv()
		self.leader_reset_send = client.get_queue_reset_recv()
		self.leader_reset_recv = client.get_queue_reset_send()
		self.stop_event = client.get_stop_event()
		self.energy_number = client.get_energy_number()
	
	def run(self):
		if self.agent.id_leader != None:
			self.run_client()
		
		if self.agent.id_supporters:
			servers = self.run_servers()
		
		jump_radius_aux = self.config.test_jump_dist
		self.agent.current.init_solution(self.hist_obj)
		self.agent.update()
		
		print 'WorkerProcess %d: \n%s' % (self.id, self.agent)
		
		start_process_time = datetime.datetime.now()
		self.agent.generation = 1
		
		best_energy = self.agent.pockets[0].energy_value
		gens_without_improve = 0
		gens_convergence = self.config.test_noimprove
		gens_start = 0
		restart_successed = True
		restarts_failed = 0
		energy_calls = self.agent.current.energy_calls
		support_energy_calls = [0 for i in range(0, self.config.num_sup)]
		self.agent.status_log_append(datetime.datetime.now() - start_process_time, energy_calls)
		
		
		while(self.stop_event.is_set() == False):
			
			# Crossover it isn't allowed to execute on agent 0
			if self.agent.id_leader != None:
				if self.agent.leader_pockets[0] != None:
					index_pocket_leader_agent = self.fitness_roulette_selection(self.agent.leader_pockets)
					index_pocket_self_agent = self.select_rand_solution(self.agent.pockets)
					self.agent.crossover(self.agent.leader_pockets[index_pocket_leader_agent], self.agent.pockets[index_pocket_self_agent], self.config.crossover_prob)
			else:
				index_pocket_self_agent = self.select_rand_solution(self.agent.pockets)
				self.agent.current = copy.deepcopy(self.agent.pockets[index_pocket_self_agent])
			
			# Local search
			time_ls_start = datetime.datetime.now()
			self.agent.simulated_annealing(self.config.ls_prob_ss, self.config.test_ls_fact, self.config.test_jump_prob, jump_radius_aux, self.config.test_temp_init, self.hist_obj)
			self.agent.time_ls += datetime.datetime.now() - time_ls_start
			jump_radius_aux = jump_radius_aux * self.config.test_jump_fact
			
			updated = self.agent.update()
			
			# Update pockets with supporter data
			if self.agent.id_supporters:
				for i in range(0, self.config.num_sup):
					while not self.support_recv[i].empty():
						self.receive_solution_pickle(self.support_recv[i], True)
						print '>> WorkerProcess %d receive a pocket from supporter %d, pocket list: %s' % (self.id, self.agent.id_supporters[i], self.agent.pockets)
			
			# Update pocket_leader with leader data
			if self.agent.id_leader != None:
				if not self.leader_recv.empty():
					self.receive_solution_pickle(self.leader_recv, False)
					print '>> WorkerProcess %d receive a list of pockets from leader %d' % (self.id, self.agent.id_leader)
					
			if updated or self.agent.update():
				# Send pocket_leader with leader data
				if self.agent.id_supporters:
					for i in range(0, self.config.num_sup):
						if not self.support_send[i].full():
							print '> WorkerProcess %d send a list of pockets to supporter %d' % (self.id, self.agent.id_supporters[i])
							self.send_solution_pickle(self.agent.pockets, self.support_send[i])

				# Send pockets with supporter data
				if self.agent.id_leader != None:
					if self.agent.pockets[0].energy_value < best_energy:
						if not self.leader_send.full():
							print '> WorkerProcess %d send a pocket to leader %d with energy: %d' % (self.id, self.agent.id_leader, self.agent.pockets[0].energy_value)
							self.send_solution_pickle(self.agent.pockets[0], self.leader_send)
			
			if self.config.calculate_div_density:
				# Diversity density calculations
				time_div_start = datetime.datetime.now()

				if self.agent.id_leader == None:
					for i in range(0, self.config.num_agents-1):
						if not self.agent_div_recv[i].empty():
							buff = self.agent_div_recv[i].get()
							agent_pockets = pickle.loads(buff)
							j = 0
							for p in agent_pockets:
								if p != None:
									self.agent.population_pockets[i][j] = copy.deepcopy(p)
								else:
									break
								j += 1

					if self.agent.id_supporters:
						for i in range(0, self.config.num_sup):
							if not self.support_div_recv[i].empty():
								buff = self.support_div_recv[i].get()
								supporter_pockets = pickle.loads(buff)
								j = 0
								for p in supporter_pockets:
									if p != None:
										self.agent.supporter_pockets[i][j] = copy.deepcopy(p)
									else:
										break
									j += 1
				else:
					if not self.root_div_send.full():
						buff = pickle.dumps(self.agent.pockets, 2)
						self.root_div_send.put(buff)

					if not self.leader_div_send.full():
						buff = pickle.dumps(self.agent.pockets, 2)
						self.leader_div_send.put(buff)

					if self.agent.id_supporters:
						for i in range(0, self.config.num_sup):
							if not self.support_div_recv[i].empty():
								buff = self.support_div_recv[i].get()
								supporter_pockets = pickle.loads(buff)
								j = 0
								for p in supporter_pockets:
									if p != None:
										self.agent.supporter_pockets[i][j] = copy.deepcopy(p)
									else:
										break
									j += 1

				self.agent.calculate_densities()
				self.agent.time_div += datetime.datetime.now() - time_div_start

			self.agent.generation += 1
			
			# Reset control
			if self.config.if_reset:
				if self.agent.id_leader == None:
					if self.agent.pockets[0].energy_value == best_energy:
						gens_without_improve += 1
					else:
						gens_without_improve = 0

					if gens_without_improve == gens_convergence:
						if self.agent.id_supporters:
							for i in range(0, self.config.num_sup):
								self.support_reset_send[i].put(0)
							for i in range(0, self.config.num_sup):
								last_solution = pickle.loads(self.support_reset_recv[i].get())
								if last_solution.energy_value < best_energy:
									restarts_failed += 1
									restart_successed = False
									self.agent.update(last_solution)
									best_energy = self.agent.pockets[0].energy_value
									gens_without_improve = 0
						if restart_successed:
							print '\n***Restart successed***\n'
							self.event_restart.set()
							if self.agent.id_supporters:
								for i in range(0, self.config.num_sup):
									self.support_reset_send[i].put(True)
						else:
							print '\n***Restart failed: %d***\n' % restarts_failed
							if self.agent.id_supporters:
								for i in range(0, self.config.num_sup):
									self.support_reset_send[i].put(False)
						restart_successed = True
				else:
					if not self.leader_reset_recv.empty():
						self.leader_reset_recv.get()
						if self.agent.id_supporters:
							for i in range(0, self.config.num_sup):
								self.support_reset_send[i].put(0)
							for i in range(0, self.config.num_sup):
								self.receive_solution_pickle(self.support_reset_recv[i], True)
						self.send_solution_pickle(self.agent.pockets[0], self.leader_reset_send)
						restart_successed = self.leader_reset_recv.get()
						if restart_successed:
							self.event_restart.set()
							if self.agent.id_supporters:
								for i in range(0, self.config.num_sup):
									self.support_reset_send[i].put(True)
						else:
							if self.agent.id_supporters:
								for i in range(0, self.config.num_sup):
									self.support_reset_send[i].put(False)
						restart_successed = True
				
				# Is event restart set?
				if self.event_restart.is_set():
					if self.agent.id_leader == None:
						# Only the root leader can keep the best solution
						self.agent.pockets = [self.agent.pockets[0]] + [None for i in range(1, self.config.num_pockets)]
						self.agent.population_pockets = [[None for i in range(0, self.config.num_pockets)] for i in range(1, self.config.num_agents)]
						for i in range(0, self.config.num_agents-1):
							if not self.agent_div_recv[i].empty():
								self.agent_div_recv[i].get()
					else:
						self.agent.pockets = [None for i in range(0, self.config.num_pockets)]
						self.agent.leader_pockets = [None for i in range(0, self.config.num_pockets)]
						if not self.leader_recv.empty():
							self.leader_recv.get()
					
					if self.agent.id_supporters:
						for i in range(0, self.config.num_sup):
							while not self.support_recv[i].empty():
								self.support_recv[i].get()
						
						self.agent.supporter_pockets = [[None for i in range(0, self.config.num_pockets)] for i in range(1, self.config.num_sup+1)]
						for i in range(0, self.config.num_sup):
							if not self.support_div_recv[i].empty():
								self.support_div_recv[i].get()
						
					self.agent.restarts += 1

					print 'RESTARTING %3d - WorkerProcess %2d - %s' % (self.agent.restarts, self.id, self.agent)
					self.agent.current.init_solution(self.hist_obj)
					self.agent.update()
					jump_radius_aux = self.config.test_jump_dist
					gens_convergence = self.config.test_noimprove + self.agent.generation - gens_convergence - gens_start
					gens_start = self.agent.generation
					gens_without_improve = 0
					self.event_restart.clear()
					print 'RESTARTED %3d - WorkerProcess %2d - %s' % (self.agent.restarts, self.id, self.agent)
			
			
			energy_calls = self.agent.current.energy_calls
			
			if self.agent.id_supporters:
				for i in range(0, self.config.num_sup):
					if not self.support_energy_number[i].empty():
						support_energy_calls[i] = self.support_energy_number[i].get()
					energy_calls += support_energy_calls[i]
			
			if not self.energy_number.full():
				self.energy_number.put_nowait(energy_calls)
			
			self.agent.status_log_append(datetime.datetime.now() - start_process_time, energy_calls)
			
			if self.agent.id_leader == None:
				if energy_calls > self.config.energy_limit:
					self.stop_event.set()
			
			best_energy = self.agent.pockets[0].energy_value
		
		if self.agent.id_supporters:
			for i in range(0, self.config.num_sup):
				self.support_stop_event[i].set()
		
		self.save_results()
		
		if self.agent.id_supporters:
			for i in range(0, self.config.num_sup):
				self.support_reset_recv[i].get()
				servers[i].shutdown()
		
		if self.agent.id_leader != None:
			self.leader_reset_send.put(0)
		
		if self.agent.id_leader == None:
			for i in range(self.config.num_sup, (self.config.num_agents + self.config.num_sup - 1)):
				servers[i].shutdown()
		
		print '\n************ WorkerProcess %d done ************\n' % (self.id)
예제 #52
0
class ActionPool:
    def __init__(
        self,
        settings=None,
        *,
        processes=None,
        max_active_or_pending=None,
        allow_abort_after=None,
    ):

        # Removed _settings because it is not required now
        # self.settings = _pickable_settings(settings)
        self.processes = processes or N_PROCESSES
        self.max_active_or_pending = max_active_or_pending or MAX_ACTIVE_OR_PENDING
        self.allow_abort_after = allow_abort_after or ALLOW_ABORT_AFTER

        self._pending_lock = Lock()
        self._pending = {}
        self._num_active_or_pending = 0
        self._next_result_id = count()

        # Created in self.start()
        self.pool = None
        self.qIn = None
        self.qOut = None

        # Statistic
        self._counter_ok = []
        self._counter_failed = []
        self._counter_aborted = []
        self._counter_skipped = []

        self._state = ActionPoolState.INIT

    # Define __enter__ and __exit__ for with-statement
    def __enter__(self):
        self.start()
        return self

    def __exit__(self, type, value, traceback):
        # self.wait(timeout=2.0, terminate_workers=True)  # moved into stop()
        self.stop()

    def _close_queues(self):
        # Caling after  pool.close() guarantees that while-loops
        # are finite?!
        #
        # If some action was not started by the pool, qIn
        # contains data.
        try:
            while True:
                d = self.qIn.get_nowait()
                mp_logger.debug(
                    "Non consumed data in queue 'qIn': {}".format(d))
        except Empty:
            pass
        finally:
            self.qIn.close()

        try:
            while True:
                d = self.qOut.get_nowait()
                mp_logger.debug(
                    "Non consumed data in queue 'qOut': {}".format(d))
        except Empty:
            pass
        finally:
            self.qOut.close()

    def start(self):
        # set_start_method("spawn")  # Too late here....

        if self._state not in [ActionPoolState.INIT, ActionPoolState.STOPED]:
            logger.error("Pool can not be started twice.")
            return

        self.qIn = Queue()  # Sends args to workers
        self.qOut = Queue()  # Get results from workers

        # https://stackoverflow.com/a/35134329
        #... but
        # 'This solution is not portable as it works only on Unix.
        #  Moreover, it would not work if the user sets the maxtasksperchild
        #  Pool parameter. The newly created processes would inherit
        #  the standard SIGINT handler again. The pebble library disables
        #  SIGINT by default for the user as soon as the new process is created.'
        if True:
            original_sigint_handler = signal.signal(signal.SIGINT,
                                                    signal.SIG_IGN)
            self.pool = Pool(
                processes=self.processes,
                initializer=_ap_handler_init,
                initargs=[self.qIn, self.qOut],
                #initargs=[self.qIn, self.qOut, self.settings],
                # Important to re-fill pool after
                # terminating some child processes!
                # Mabye not needed in 'spawn'-context
                maxtasksperchild=1)
            signal.signal(signal.SIGINT, original_sigint_handler)
        else:
            # Hm, this hangs sometimes :-(
            self.pool = get_context("spawn").\
                    Pool(processes=self.processes,
                         initializer=_ap_handler_init,
                         initargs=[self.qIn, self.qOut],
                         # initargs=[self.qIn, self.qOut, self.settings],
                         # Important to re-fill pool after
                         # terminating some child processes!
                         # Mabye not needed in 'spawn'-context
                         maxtasksperchild=1
                        )
        self._state = ActionPoolState.STARTED

        logger.info("ActionPool started")

    def stop(self):
        if self._state not in [ActionPoolState.STARTED]:
            logger.error("Pool is not started.")
            return

        self.pool.close()
        self.fetch_started_ids()  # clears qOut
        self._state = ActionPoolState.STOPED

        # Without this, child processes of workers
        # will survive deconstruction of this object.
        self.wait(timeout=1.0, terminate_workers=True)

        # Docs: 'joining process that uses queues needs to
        #        read all data before .join() is called.
        #        Otherwise .join() will block'
        #
        # NOTE: Well, reading all data is not a sufficient
        #       condition but required.
        #       Thus we need still pool.terminate() before pool.join()!
        #       The call of _close_queues() is just for generating
        #       log messages for debugging.
        self._close_queues()

        # This terminating resolves hanging pool.join()
        self.pool.terminate()
        self.pool.join()
        logger.info("ActionPool stoped")

    def push_action(self, f, args=()):
        #self.fetch_started_ids()

        # Gen id for this action
        rid = next(self._next_result_id)

        # NOTE: _num_active_or_pending <= len(self.pool._cache),
        # and not equal if we killed a process and didn't handled their
        # results/remove it from _cache by hand (?!)

        # Kill old open jobs if no space for more is left
        if self._num_active_or_pending >= self.max_active_or_pending:
            logger.debug("Start cleanup for {} pending "
                         "actions".format(self._num_active_or_pending))
            self.kill_stale_actions()

        # Re-check if space is available
        if self._num_active_or_pending >= self.max_active_or_pending:
            logger.debug("Cannot start new action. Still {} "
                         "pending".format(self._num_active_or_pending))
            self._counter_skipped.append(rid)
            return False

        # Put input arguments of action into Queue.
        logger.debug("Put rid={} in qIn".format(rid))
        self.qIn.put_nowait((rid, f, args))

        def action_succeded(t):
            mp_logger.debug("Action handler finished.")
            exitcode, rid, err, ret = t

            # Following leads to 'RuntimeError: dictionary changed size ...'
            # if not locked because this function is called from another thread
            self._pending_lock.acquire()
            self._pending.pop(rid, None)
            self._num_active_or_pending -= 1
            self._pending_lock.release()

            if exitcode == 0:
                self._counter_ok.append(rid)
            else:
                self._counter_failed.append(rid)
                mp_logger.error("Action handler error: {}".format(err))

            # Take (at least) this entry from qOut.
            self.fetch_started_ids()

        def action_failed(t):
            # Due try-catch construction in _ap_handler it will not
            # reached if the action (=f) failed.
            # Nevertheless this will be called if the process is
            # killed by c.terminate()
            mp_logger.debug("_ap_handler failed. Reason: {}".format(t))
            return

        # Add action into pool (as argument for '_ap_handler')
        # NOTE: We can not give 'rid' as argument to _ap_handler
        #       because the reading order of the queue can be scrambled.
        #       Thus we need to read this value from qIn.
        result = self.pool.apply_async(_ap_handler,
                                       args=(),
                                       callback=action_succeded,
                                       error_callback=action_failed)

        self._pending_lock.acquire()
        self._pending[rid] = Pending(None, result, None)
        self._num_active_or_pending += 1
        self._pending_lock.release()
        # Values for None-fields will be put in queue
        # if action-processes starts. Currently, they are unknown.

        return True

    def running_time_exceeded(self, start_time):
        return (time() - start_time > self.allow_abort_after)

    def fetch_started_ids(self):
        # Check which processes had alreaded started
        # and filled the queue

        while False:
            try:
                (rid, f, args) = self.qIn.get_nowait()
                logger.debug("\t\t\tHey, qIn not empty: {} {}".format(rid, f))
            except Empty:
                break

        while True:
            try:
                [rid, pid, time] = self.qOut.get_nowait()
                # [rid, pid, time] = self.qOut.get(block=True, timeout=0.1)
            except Empty:
                break

            pend = self._pending.get(rid)
            if pend:
                self._pending_lock.acquire()
                self._pending[rid] = pend._replace(pid=pid, time=time)
                self._pending_lock.release()
            else:
                # Do not update entry because this action was
                # already finished and action_succeded() had removed
                # the entry from _pending
                pass

    def kill_stale_actions(self, number_to_kill=1):
        self.fetch_started_ids()  # Check for new timestamps

        if number_to_kill <= 0:
            return

        to_remove = []
        self._pending_lock.acquire()
        _pending_copy = self._pending.copy()
        self._pending_lock.release()

        for rid, pend in _pending_copy.items():
            if pend.time is None:
                # This actions did not has started => no start time
                # available
                continue

            if self.running_time_exceeded(pend.time):
                # Find process for this pid
                for c in self.pool._pool:
                    # print(pend.pid, c.pid)
                    # Note that only N_PROCESSES different values for c.pid
                    # are possible.
                    # We assuming here that only one entry in _pending will match
                    # because earlier processes are already removed from this dict.
                    if c.pid == pend.pid:
                        to_remove.append((rid, c))
                        number_to_kill -= 1
                        break

            if number_to_kill <= 0:
                break

        if to_remove:
            # logger.debug("\t\t\tLen active_children A: {}".\
            #        format(len(self.pool._pool)))
            pass

        for (rid, c) in to_remove:
            # Terminates children created by subprocess.Popen
            kill_children(c.pid, False)

            # Now terminate process
            logger.debug("Send SIGTERM to {}".format(c.pid))
            if c.exitcode is None:
                c.terminate()

            if c.is_alive():
                try:
                    c.join(timeout=1.0)
                except TimeoutError:
                    logger.debug("Joining failed")
                    pass

            if c.exitcode is None:
                logger.debug("Send SIGKILL to {}".format(c.pid))
                c.kill()

            self._pending_lock.acquire()
            pend = self._pending.pop(rid, None)
            if pend:
                self._num_active_or_pending -= 1
                self._counter_aborted.append(rid)
                # Remove result from pool._cache
                try:
                    # Hm, wrong thread?! Sometimes it is already removed from _cache
                    pend.result._set(0, (False, TimeoutError("Stale action")))
                except KeyError:
                    pass

            self._pending_lock.release()

        if to_remove:
            # logger.debug("\t\t\tLen active_children B: {}".\
            #        format(len(self.pool._pool)))
            self.pool._repopulate_pool(
            )  # Hm, does not hold len(_pool) constant
            # logger.debug("\t\t\tLen active_children C: {}".\
            #        format(len(self.pool._pool)))

    def wait_debug(self):
        n = 0
        while len(self.pool._cache) > 0:
            sleep(1.0)
            print(".",
                  self.pool._cache.keys(),
                  "|",
                  self._num_active_or_pending,
                  "P",
                  len(self.pool._pool),
                  end="\n")
            self.kill_stale_actions(self._num_active_or_pending)
            n += 1
            if (n % 5 == 0): print("")

    def wait(self, timeout=None, terminate_workers=False):
        """ Give each pending action the guaranteed running time
            but kill them if they are not fast enough.
            (Thus duration(self.wait()) <= duration(self.pool.join())

            Note/TODO: High ALLOW_ABORT_AFTER could leads to a very long
                       blocking.
        """

        if timeout is not None:
            end_time = time() + timeout

        abort_loop = 1000  # Just as fallback

        while self._num_active_or_pending > 0 or abort_loop == 0:
            abort_loop -= 1
            # Waiting on first process. (Return of others ignored here)
            result = next(iter(self.pool._cache.values()))
            wait_time = self.allow_abort_after
            if timeout is not None:
                wait_time = min(wait_time, end_time - time())
            if wait_time <= 0:
                logger.debug("ActionPool.wait() reached timeout")
                break
            try:
                result.get(wait_time)
            except TimeoutError:
                pass

            self.kill_stale_actions(self._num_active_or_pending)

        if timeout is not None and terminate_workers:
            self.kill_workers()
        if abort_loop == 0:
            raise RuntimeError("Some processes still running?!")

    def kill_workers(self):
        for c in self.pool._pool:
            # Terminates children created by subprocess.Popen
            kill_children(c.pid, False)

            if c.exitcode is None:
                c.terminate()
            try:
                c.join(timeout=1.0)
            except TimeoutError:
                pass
            if c.exitcode is None:
                c.kill()

    def statistic(self):
        # Due usage of multiprocessing.queues.SimpleQueue
        # and other problems it is hard do count the running
        # task. Just assume that all self.processes
        # will be used all the time.
        num_active = min(len(self.pool._cache), self.processes)

        return (" Tasks        ok: {}\n"
                " Tasks   skipped: {}\n"
                " Tasks   aborted: {}\n"
                " Tasks    failed: {}\n"
                "#Tasks    active: {}\n"
                "#Tasks not begun: {}".format(
                    self._counter_ok,
                    self._counter_skipped,
                    self._counter_aborted,
                    self._counter_failed,
                    num_active,
                    self._num_active_or_pending - num_active,
                ))
예제 #53
0
dbtweets = db.tweets

count = Queue()
for i in range(dbtweets.find({'hist_list' : {'$exists': False}}).count()):
	count.put(i)

queue = Queue()
total_number = dbtweets.find({'hist_list' : {'$exists': False}}).count()

for i in range(dbtweets.find({'hist_list' : {'$exists': False}}).count()):
	try:
		one_doc = [dbtweets.find({'hist_list' : {'$exists': False}})[i]['tweet_id'], 
					dbtweets.find({'hist_list' : {'$exists': False}})[i]['author_full_name']
					]
		#print "this is No.%d" %i + " Doc: ", one_doc
		queue.put_nowait(one_doc)
		print one_doc
		if (i>4):
			break
	except Exception:
		continue



url = "https://twitter.com/login"
payload = { 'session[username_or_email]': 'irony_research', 
			'session[password]': 'research_irony'
			}

# two more accounts
예제 #54
0
class StatsBasedOnIds(StatsType):
    def __init__(self,
                 samples_id: Collection[str],
                 is_primary_acc_id: bool,
                 virus_db_id: Optional[int] = None,
                 sources: Optional[List[str]] = None):
        super().__init__()
        self._scheduled_samples_acc_id = [str(x) for x in samples_id]
        message = f"\n" \
                  f"STATS MODULE:\n" \
                  f"scheduled import of {len(self._scheduled_samples_acc_id)} samples"
        if virus_db_id is not None:
            self._measure_delta_in_db(virus_db_id, is_primary_acc_id, sources)
        if len(set(samples_id)) == len(samples_id):
            logger.info(message)
        else:
            set_scheduled = set(self._scheduled_samples_acc_id)
            _duplicated_accession_id = list(self._scheduled_samples_acc_id)
            for x in set_scheduled:
                _duplicated_accession_id.remove(x)
            difference = len(_duplicated_accession_id)
            message = f'\n{difference} of the scheduled samples appear to have '
            message += 'primary ' if is_primary_acc_id else 'alternative '
            message += " duplicated accession ids. Ids are\n"
            message += f"{sorted(_duplicated_accession_id)}"
            logger.error(message)
            sys.exit(1)
        self._completed_samples_acc_id = Queue()
        self._removed_samples_acc_id = Queue()

    def completed_sample(self, sample_acc_id: str):
        self._completed_samples_acc_id.put_nowait(sample_acc_id)

    def removed_samples(self, samples_acc_id: List[str]):
        for i in samples_acc_id:
            self._removed_samples_acc_id.put_nowait(i)

    def get_scheduled_not_completed(self) -> set:
        completed_queue_copy = Queue()
        completed_set = set()
        while not self._completed_samples_acc_id.empty():
            try:
                item = self._completed_samples_acc_id.get(False)
                completed_set.add(item)
                completed_queue_copy.put_nowait(item)
            except Empty:
                continue
        # reassign completed queue to its copy
        self._completed_samples_acc_id.close()
        self._completed_samples_acc_id = completed_queue_copy
        return set(self._scheduled_samples_acc_id) - completed_set

    def check_samples_imported(self):
        if self._completed_samples_acc_id is None:
            logger.error(
                'STATS MODULE: check_samples_imported called before add_samples. Stats cannot be produced.'
            )
        else:
            warn = False
            message = f"\n" \
                      f"STATS MODULE:\n"
            _scheduled_samples_acc_id = set(self._scheduled_samples_acc_id)
            message += f"Scheduled import of {len(_scheduled_samples_acc_id)} samples\n"
            completed_samples_acc_id = self._queue_to_set(
                self._completed_samples_acc_id)
            removed_samples_acc_id = self._queue_to_set(
                self._removed_samples_acc_id)
            self._completed_samples_acc_id.close()
            self._removed_samples_acc_id.close()
            message += f"Removed {len(removed_samples_acc_id)} samples\n"
            message += f"Completed {len(completed_samples_acc_id)} samples\n"

            # find errors
            scheduled_not_completed = _scheduled_samples_acc_id - completed_samples_acc_id
            completed_not_scheduled = completed_samples_acc_id - _scheduled_samples_acc_id
            if len(scheduled_not_completed) > 0:
                warn = True
                message += f'Failed (scheduled but not completed) {len(scheduled_not_completed)} samples\n' \
                           f'\tsamples id: {scheduled_not_completed}\n'
            if len(completed_not_scheduled) > 0:
                warn = True
                message += f'Wrongly processed (completed but not scheduled) {len(completed_not_scheduled)} samples\n' \
                           f'\tsamples id: {completed_not_scheduled}\n'

            # check against DB
            if self._virus_db_id is not None:
                if self._is_primary_acc_id:
                    current_sequences_in_db = database.try_py_function(
                        vcm.sequence_primary_accession_ids, self._virus_db_id,
                        self._sources)
                else:
                    current_sequences_in_db = database.try_py_function(
                        vcm.sequence_alternative_accession_ids,
                        self._virus_db_id, self._sources)
                current_sequences_in_db = Counter(current_sequences_in_db)
                inserted_in_db = current_sequences_in_db - self._sequences_in_db_at_start
                removed_from_db = self._sequences_in_db_at_start - current_sequences_in_db
                message += f"Sequences in DB:\n" \
                           f"\tprevious number {sum(self._sequences_in_db_at_start.values())}.\n" \
                           f"\tcurrent number {sum(current_sequences_in_db.values())}\n" \
                           f"\tdifference: {sum(inserted_in_db.values())} new - {sum(removed_from_db.values())} missing from source and deleted  = {sum(inserted_in_db.values()) - sum(removed_from_db.values())}\n"
                # check duplicates
                num_duplicates_at_start = sum(
                    self._sequences_in_db_at_start.values()) - len(
                        set(self._sequences_in_db_at_start))
                num_current_duplicates = sum(
                    current_sequences_in_db.values()) - len(
                        set(current_sequences_in_db))
                if num_duplicates_at_start > 0 or num_current_duplicates > 0:
                    warn = True
                    message += f"Duplicated accession_ids in DB:\n" \
                               f"\tprevious number of duplicates: {num_duplicates_at_start}\n" \
                               f"\tcurrent number of duplicates: {num_current_duplicates}\n" \
                               f"\tdetail of current duplicated accession_ids: {sorted(list(current_sequences_in_db - Counter(set(current_sequences_in_db))))}\n"

                # check errors in insertions
                changed_in_db = Counter(
                    removed_samples_acc_id
                )  # this set cannot be retrieved from the database
                completed_not_inserted = Counter(
                    completed_samples_acc_id) - inserted_in_db - changed_in_db
                inserted_not_completed = inserted_in_db - Counter(
                    completed_samples_acc_id) - changed_in_db
                if sum(completed_not_inserted.values()) > 0:
                    warn = True
                    message += f'Number of samples completed and not imported into the DB: {sum(completed_not_inserted.values())}\n' \
                               f'\taccession ids: {sorted(list(completed_not_inserted.elements()))}\n'
                if sum(inserted_not_completed.values()) < 0:
                    warn = True
                    message += f'Number of samples imported into the DB but incomplete: {sum(inserted_not_completed.values())}\n' \
                               f'\taccession ids: {sorted(list(inserted_not_completed.elements()))}\n'

            # performance
            message += self._performance_message(len(completed_samples_acc_id))

            if not warn:
                logger.info(message)
            else:
                logger.warning(message)

    @staticmethod
    def _queue_to_set(q: Queue):
        s = set()
        while not q.empty():
            try:
                s.add(q.get(False))
            except Empty:
                continue
        return s
예제 #55
0
class Crawler(Process):
    def __init__(self, id, GPR, crawlers_queue):
        Process.__init__(self)
        self.id = id
        self.GPR = GPR
        # Making a queue only contains 1 element.
        self.__queue = Queue(1)
        self.__crawlers_queue = crawlers_queue

    def add_request(self, request):
        '''
		Add a request to crawler to crawl. This method is a non-blocking method.
		Return True if succeed, False if failed. 
		NOTE: It should be always TRUE because when scheduler decide to add a request
		to crawler, it's scheduler's reponsibility to find out whether this crawler 
		is available.
		'''
        try:
            self.__queue.put_nowait(request)
        except Q.Full:
            return False
        else:
            return True

    def run(self):
        while True:
            request = self.__queue.get()

            # Timing
            t = Timing()
            type = None
            end_url = None
            headers = None
            content = None

            with t:
                # Urlopen.
                try:
                    content, end_url, headers, type = self._send_request(request)
                    if len(content) == MAX_PAGE_SIZE:
                        type = share.SIZE_LIMIT_ERROR
                        content = 'Page size exceeds maximum!'
                except urllib2.HTTPError as e:
                    print 'HTTPERROR', e
                    type = e.code
                    content = str(e)
                    headers = e.headers.dict
                except urllib2.URLError as e:
                    print 'URLError', e
                    content = str(e)
                    if content.find('timed out') != -1:
                        type = share.TIMEOUT_ERROR
                    elif content.find('unknown url type') != -1:
                        type = share.URL_PARSE_ERROR
                    elif content.find('getaddrinfo failed') != -1:
                        type = share.DNS_ERROR
                    else:
                        type = share.UNKOWN_ERROR

                except ValueError as e:
                    print 'Value Error', e
                    type = share.URL_PARSE_ERROR
                    content = str(e)
                except:
                    # Catch all errors.
                    print 'Unkown error'
                    raise
                    type = share.UNKOWN_ERROR
                    content = 'Unkown Error'
            print t

            self.GPR.put((type, self.name, t.Timing, request, end_url, headers, content))

            if t.Timing < MAX_REQUEST_TIME:
                # Add itself to crawlers_queue to tell scheduler I'm ready to do the next job.
                self.__crawlers_queue.put(self.id)

    @Retry(MAX_RETRY)
    def _send_request(self, request, retry):
        print '[', self.name, ']', ' opening:', request
        response = urllib2.urlopen(request, timeout=MAX_HTTP_REQUEST_TIME)
        print 'reading...'
        content = response.read(MAX_PAGE_SIZE)
        end_url = response.url
        headers = response.headers.dict
        type = response.getcode()
        print 'complete!'
        return content, end_url, headers, type
예제 #56
0
파일: run_game.py 프로젝트: giv194/CSE150
class Game(object):
    def __init__(self, M, N, K, player_classes, timeout=None):
        self.players = Player.create_players(player_classes)
        self.state = State.initial(M, N, K, self.players[0])
        self.timeout = timeout

    def play(self):
        while not self.state.is_terminal():
            self.before_move()
            next_move = self.request_move()
            self.state = self.state.result(next_move)
            self.after_move()
        return self.state.winner_color

    def request_move(self):
        # state = copy.deepcopy(self.state)
        state = self.state
        player = self.state.to_play

        if self.timeout is None:
            # No timeout, just use a single process
            action = self.state.to_play.move(state)
        else:
            # For passing the messages back and forth
            self.result_q = Queue(1)
            self.signal_q = Queue(1)

            # Dynamically augment the player instance
            def is_time_up(self):
                try:
                    self._signal_q.get_nowait()
                    return True
                except Empty:
                    return False

            def do_move(self, state, result_q, signal_q):
                sys.stdin = os.fdopen(self.fileno)
                self._signal_q = signal_q
                result_q.put_nowait(self.move(state))

            player.is_time_up = MethodType(is_time_up, player)
            player.do_move = MethodType(do_move, player)
            player.fileno = sys.stdin.fileno()


            # Boot a process for the player move
            move_process = Process(target=player.do_move, args=(state, self.result_q, self.signal_q))
            move_process.start()

            action = None
            try:
                action = self.result_q.get(True, self.timeout)

            except Empty:
                # Send the "time is up" warning
                self.signal_q.put_nowait(0)

                # Wait one second and get the move
                try:
                    action = self.result_q.get(True, 1)
                except Empty:
                    pass

            # Clear queues
            try:
                self.signal_q.get_nowait()
            except Empty:
                pass

            try:
                self.result_q.get_nowait()
            except Empty:
                pass

            if move_process.is_alive():
                move_process.terminate()
                move_process.join(1)

            if action is None:
                # If a move wasn't placed on the result pipe in time, play a random move
                action = self.state.actions()[0]

        return action
예제 #57
0
class TaskManager:
    def __init__(self, log_queue, log_level=logging.INFO):
        self.tx_queue = Queue()
        self.rx_queue = Queue()
        self.log_queue = log_queue
        self.log_level = log_level
        self.worker = Worker(rx_queue=self.tx_queue, tx_queue=self.rx_queue)
        self.worker_process = None
        self._receivers = {}
        self.register("pong", self.handle_pong)

    def register(self, name, callback, overwrite=False):
        if not overwrite and name in self._receivers:
            raise RuntimeError(f"{name} is already registered...")
        self._receivers[name] = callback

    def register_task(self, name, callback, threaded=False, on_complete=None):
        self.worker.register(name, callback, threaded, on_complete)

    def register_handler(self, name, callback, overwrite=False):
        self.register(name, callback, overwrite)

    def call(self, name, **kwargs):
        if not kwargs:
            kwargs = None
        msg = Message(name, kwargs)
        self.send_message(msg)

    def start_process(self):
        self.worker_process = Process(target=self.worker.process_tasks,
                                      args=(self.log_queue, self.log_level))
        self.worker_process.start()

    def is_alive(self):
        return self.worker_process.is_alive()

    def quit(self):
        self.send_message(Message("quit"))

    def ping(self):
        self.send_message(Message("ping"))

    def send_message(self, msg: Message):
        self.tx_queue.put_nowait(msg)

    def receive_message(self) -> Message:
        try:
            return self.rx_queue.get_nowait()
        except Empty:
            return None

    def handle_pong(self):
        pass

    def dispatch(self, msg):
        if msg.name != "pong":
            logger.debug("Received Message: %s", msg)

        func = self._receivers.get(msg.name)
        if func is None:
            logger.warning(
                "Received unexpected command (%s) from worker. Ignoring...",
                msg.name)
            return

        kwargs = {}
        if msg.kwargs is not None:
            kwargs.update(msg.kwargs)

        func(**kwargs)
예제 #58
0
파일: Server.py 프로젝트: umd-afrl/devices
async def start(queue: Queue, toggle: Queue, ip='192.168.1.7', port=8080):
    global SERVER, runner, site, in_queue, toggle_queue
    SERVER.on_startup.append(on_startup)
    SERVER.on_shutdown.append(on_shutdown)
    SERVER.router.add_get('/', root_handler)
    SERVER.router.add_get('/ws', websocket_handler)
    SERVER.router.add_post('/toggleavmu', toggle_handler)
    SERVER.router.add_static(prefix='/', path=WEB_ROOT)
    in_queue = queue
    toggle_queue = toggle
    logging.basicConfig(level=logging.DEBUG)
    runner = web.AppRunner(SERVER)
    await runner.setup()
    site = web.TCPSite(runner, ip, port)
    await site.start()
    print('Site available at http://' + site.__getattribute__('_host') + ':' +
          str(site.__getattribute__('_port')))


async def end():
    await SERVER.shutdown()


if __name__ == '__main__':
    asyncio.ensure_future(start(Queue(), Queue(), ip='localhost', port=8080))
    loop = asyncio.get_event_loop()

    for i in range(1000):
        in_queue.put_nowait(i)
    loop.run_forever()
예제 #59
0
파일: test_base.py 프로젝트: schooft/hidra
class TestBase(unittest.TestCase):
    """The Base class from which all data fetchers should inherit from.
    """

    loglevel = "error"

    def setUp(self):

        self.log_queue = False
        self.listener = None
        self.log = None
        self.context = None
        self.base_dir = _environment.BASE_DIR

        main_pid = os.getpid()
        self.con_ip = m_socket.getfqdn()
        self.ext_ip = m_socket.gethostbyaddr(self.con_ip)[2][0]
        ipc_dir = os.path.join(tempfile.gettempdir(), "hidra")

        ports = {
            "com": 50000,
            "request": 50001,
            "request_fw": 50002,
            "router": 50004,
            "eventdet_port": 50003,
            "control_pub": 50005,
            "control_sub": 50006,
            "cleaner": 50051,
            "cleaner_trigger": 50052,
            "confirmation": 50053,
        }

        self.ipc_addresses = utils.set_ipc_addresses(ipc_dir=ipc_dir,
                                                     main_pid=main_pid)

        confirm_ips = [self.ext_ip, self.con_ip]

        endpoints = utils.set_endpoints(ext_ip=self.ext_ip,
                                        con_ip=self.con_ip,
                                        ports=ports,
                                        confirm_ips=confirm_ips,
                                        ipc_addresses=self.ipc_addresses)

        self.config = {
            "ports": ports,
            "ipc_dir": ipc_dir,
            "main_pid": main_pid,
            "endpoints": endpoints,
        }

        MockLogging.loglevel = self.loglevel
        self._init_logging(loglevel=self.loglevel)

#        self.log.debug("%s pid %s", self.__class__.__name__, main_pid)

    def __iter__(self):
        for attr, value in self.__dict__.items():
            yield attr, value

    def _init_logging(self, loglevel=LOGLEVEL):
        """Initialize log listener and log queue.

        Args:
            loglevel: The log level with of StreamHandler to be started.
        """

        loglevel = loglevel.lower()

        # Create handler
        handler = utils.get_stream_log_handler(loglevel=loglevel)

        # Start queue listener using the stream handler above
        self.log_queue = Queue(-1)
        self.listener = utils.CustomQueueListener(self.log_queue, handler)
        self.listener.start()

        # Create log and set handler to queue handle
        root = logging.getLogger()
        qhandler = QueueHandler(self.log_queue)
        root.addHandler(qhandler)

        #        self.log = utils.get_logger("test_datafetcher", self.log_queue)
        self.log = MockLogging()

    def set_up_recv_socket(self, port):
        """Create pull socket and connect to port.

        Args:
            port: Port to connect to.
        """

        endpoint = "tcp://{}:{}".format(self.ext_ip, port)

        return self.start_socket(name="receiving_socket",
                                 sock_type=zmq.PULL,
                                 sock_con="bind",
                                 endpoint=endpoint)

    def start_socket(self, name, sock_type, sock_con, endpoint):
        """Wrapper of utils.start_socket
        """

        socket, _ = utils.start_socket(name=name,
                                       sock_type=sock_type,
                                       sock_con=sock_con,
                                       endpoint=endpoint,
                                       context=self.context,
                                       log=self.log)

        return socket

    def stop_socket(self, name, socket=None):
        """Wrapper for utils.stop_socket.
        """
        # use the class attribute
        if socket is None:
            socket = getattr(self, name)
            use_class_attribute = True
        else:
            use_class_attribute = False

        return_socket = utils.stop_socket(name=name,
                                          socket=socket,
                                          log=self.log)

        # class attributes are set directly
        if use_class_attribute:
            setattr(self, name, return_socket)
        else:
            return return_socket

    def tearDown(self):

        for _, endpoint in self.ipc_addresses._asdict().items():
            try:
                os.remove(endpoint)
                self.log.debug("Removed ipc socket: %s", endpoint)
            except OSError:
                pass
#                self.log.debug("Could not remove ipc socket: %s", endpoint)
            except Exception:
                self.log.warning("Could not remove ipc socket: %s",
                                 endpoint,
                                 exc_info=True)

        if self.listener is not None:
            self.log_queue.put_nowait(None)
            self.listener.stop()
            self.listener = None
예제 #60
0
class Producer(object):
    """
    Abstract base class for all production activities

    Manages a process and its inbound/outbound queues.

    Child classes should implement:
        * handle_message: receive messages from the host application
        * production_step: do the next production step for this process
    """
    __metaclass__ = ABCMeta

    def __init__(self, buffer_size=None):
        """
        Args:
            * buffer_size: how many outbound productions to cache.
                If buffer_size is None, will continue producing for all time
                If buffer_size is an integer, it will fill the outbound queue with
                    exactly that many items. It will only produce again when the 
                    queue drops under the buffer size
        """
        self.process = None
        self.inbound = Queue()
        if buffer_size is None:
            self.outbound = Queue()
        else:
            self.outbound = Queue(maxsize=buffer_size)
        self._did_start = False
        self._exit = Event()

    def _shutdown(self):
        self.inbound.close()
        self.outbound.close()
        self._exit.set()

    @abstractmethod
    def handle_message(self, msg):
        """Handle an inbound message from the host application"""
        pass

    @abstractmethod
    def production_step(self):
        """Produce the next step in the output sequence"""
        pass

    def run(self, inbound, outbound):
        """
        The "run step" for this process. Handles
        inbound messages, and generating production steps

        Args:
            * inbound: the inbound message queue, which can send commands
                to the process. If a STOP_MSG item is sent,
                the process terminates
            * outbound: the outbound production queue- the output

        NB: I tried having these as `self` accesses, and not parameters,
        but it seems like the queues wouldn't get populated.
        """
        while not self._exit.is_set():
            while not inbound.empty():
                msg = inbound.get_nowait()
                try:
                    self.handle_message(msg)
                except Exception as e:
                    outbound.put(MessageHandlingError(e))
            if not outbound.full():
                try:
                    outbound.put(self.production_step())
                except Exception as e:
                    outbound.put(ProductionStepError(e))

    def start(self):
        """
        Start the child production process
        """
        if self._did_start:
            raise AlreadyStartedError()
        self.process = Process(target=self.run,
                               args=(self.inbound, self.outbound))
        self._did_start = True
        self.process.start()

    def stop(self):
        """
        Send a stop message to end the child process. The child process
        will take this to shutdown gracefully.
        """
        if self._did_start:
            self._shutdown()
            self.process.join(0.01)
            self.process.terminate()

    def send(self, msg):
        """
        Send a message to the child process

        Args:
            msg: whatever arbitrary data the child process
                wishes to handle
        """
        self.inbound.put_nowait(msg)

    def get(self, timeout=0.01):
        """
        Return the next message in the outbound queue.

        If that message contains an exception, raises the
        exception instead.

        If the process hasn't been started, starts the process
        instead.
        """
        if not self._did_start:
            raise NotStartedException()
        res = self.outbound.get(timeout=timeout)
        if isinstance(res, ProductionError):
            raise res
        return res