Ejemplo n.º 1
0
def start_vision_process(manager: SyncManager) -> \
        Tuple[Namespace, Event, Event]:
    ns = manager.Namespace()
    # init ns
    setattr(ns, 'active_config', ConfigMode.GEARS)

    evt = manager.Event()
    sh_evt = manager.Event()
    proc = mp.Process(target=vision_starter, args=(ns, evt, sh_evt))
    proc.start()
    return ns, evt, sh_evt
Ejemplo n.º 2
0
 def __init__(self):
     mgr = SyncManager()
     mgr.start(signal.signal, (signal.SIGINT, signal.SIG_IGN))
     self.ns_default = mgr.Namespace()
     self.ns_default.error = None
     self.ns_stats = mgr.Namespace()
     self.input_queue = mgr.Queue(maxsize=100)
     self.error_occurred = mgr.Event()
     self.error_processed = mgr.Event()
     self.batch_done = mgr.Event()
     self.mgr = mgr
     self.stats_lock = mgr.Lock()
     self.main_lock = mgr.Lock()
Ejemplo n.º 3
0
 def __init__(self, mp_manager: SyncManager, stop_event: threading.Event,
              closing_event: threading.Event, connect_q: queue.Queue,
              result_q: queue.Queue):
     self.connect_q = connect_q
     self.result_q = result_q
     self._stop_event = stop_event
     self._closing_event = closing_event
     self.all_connections_created = mp_manager.Event()
     self.all_connections_closed = mp_manager.Event()
     self.last_all_closed = time.time()
     self.connections: Dict[int, Connection] = {}
     self.sock_filenos: List[int] = []  # cache this, select() takes a list
     self.connected_targets: Dict[Tuple[int, str, int], Connection] = {}
Ejemplo n.º 4
0
 def __init__(self, conf_path='config/pn_conf.yaml'):
     # input param
     self.conf_path = conf_path
     self.conf = getConf(self.conf_path, root_key='audio')
     if self.conf['target'] == 'pi':
         self.t2s = Text2SpeechBaidu(self.conf_path)  # sync
     else:
         self.t2s = Text2SpeechXunFei(self.conf_path)  # sync
     self.executor_t2s = concurrent.futures.ProcessPoolExecutor(2)  # async
     if self.conf['use_custom_manager']:
         # create proxy manager
         mgr = SyncManager(
             (get_lan_ip(), self.conf['custom_manager_port']),
             self.conf['custom_manager_authkey'].encode('utf8'))
         sleep(0.5)  # wait for manager to start
         mgr.connect()
     else:
         mgr = multiprocessing.Manager()
     self.q_audio = mgr.Queue()
     #-#        debug('audio data queue created. %s', self.q_audio)
     self.event_exit = mgr.Event()
     multiprocessing.current_process(
     ).authkey = self.conf['custom_manager_authkey'].encode(
         'utf8')  # https://bugs.python.org/issue7503
     self.proc_play = multiprocessing.Process(target=self.playAudioFromQ,
                                              args=(self.q_audio,
                                                    self.event_exit))
     self.proc_play.start()
     #-#        debug('play background proc start. %s', self.proc_play)
     # 触发进程池worker进程创建, 貌似提前创建的占用内存小些
     self.executor_t2s.map(noop_func, (None, None))
Ejemplo n.º 5
0
    def __init__(self, conf_path='config/pn_conf.yaml'):
        self.conf_path = os.path.abspath(conf_path)
        self.conf = getConf(self.conf_path, root_key='itchat')

        self.thread_id = None

        self.gid = None  # 记录我们群的UserName
        if self.conf['use_custom_manager']:
            # create proxy manager
            mgr = SyncManager(
                (get_lan_ip(), self.conf['custom_manager_port']),
                self.conf['custom_manager_authkey'].encode('utf8'))
            #-#            sleep(0.5)  # wait for manager to start
            mgr.connect()
        else:
            mgr = multiprocessing.Manager()
        self.q_send = mgr.Queue()
        self.event_exit = mgr.Event()
        multiprocessing.current_process(
        ).authkey = self.conf['custom_manager_authkey'].encode(
            'utf8')  # https://bugs.python.org/issue7503
        self.proc_wx = multiprocessing.Process(target=self.run,
                                               args=(self.event_exit,
                                                     self.q_send))
        self.proc_wx.start()
Ejemplo n.º 6
0
class Executor(Eventful):
    '''
    The executor guides the execution of a single state, handles state forking
    and selection, maintains run statistics and handles all exceptional
    conditions (system calls, memory faults, concretization, etc.)
    '''

    _published_events = {'enqueue_state', 'generate_testcase', 'fork_state', 'load_state', 'terminate_state'}

    def __init__(self, initial=None, store=None, policy='random', context=None, **kwargs):
        super().__init__(**kwargs)

        # Signals / Callbacks handlers will be invoked potentially at different
        # worker processes. State provides a local context to save data.

        self.subscribe('did_load_state', self._register_state_callbacks)

        # This is the global manager that will handle all shared memory access among workers
        self.manager = SyncManager()
        self.manager.start(lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))

        # The main executor lock. Acquire this for accessing shared objects
        self._lock = self.manager.Condition()

        # Shutdown Event
        self._shutdown = self.manager.Event()

        # States on storage. Shared dict state name ->  state stats
        self._states = self.manager.list()

        # Number of currently running workers. Initially no running workers
        self._running = self.manager.Value('i', 0)

        self._workspace = Workspace(self._lock, store)

        # Executor wide shared context
        if context is None:
            context = {}
        self._shared_context = self.manager.dict(context)

        # scheduling priority policy (wip)
        # Set policy
        policies = {'random': Random,
                    'uncovered': Uncovered,
                    'branchlimited': BranchLimited,
                    }
        self._policy = policies[policy](self)
        assert isinstance(self._policy, Policy)

        if self.load_workspace():
            if initial is not None:
                logger.error("Ignoring initial state")
        else:
            if initial is not None:
                self.add(initial)

    def __del__(self):
        self.manager.shutdown()

    @contextmanager
    def locked_context(self, key=None, default=dict):
        ''' Executor context is a shared memory object. All workers share this.
            It needs a lock. Its used like this:

            with executor.context() as context:
                visited = context['visited']
                visited.append(state.cpu.PC)
                context['visited'] = visited
        '''
        assert default in (list, dict, set)
        with self._lock:
            if key is None:
                yield self._shared_context
            else:
                sub_context = self._shared_context.get(key, None)
                if sub_context is None:
                    sub_context = default()
                yield sub_context
                self._shared_context[key] = sub_context

    def _register_state_callbacks(self, state, state_id):
        '''
            Install forwarding callbacks in state so the events can go up.
            Going up, we prepend state in the arguments.
        '''
        # Forward all state signals
        self.forward_events_from(state, True)

    def enqueue(self, state):
        '''
            Enqueue state.
            Save state on storage, assigns an id to it, then add it to the
            priority queue
        '''
        # save the state to secondary storage
        state_id = self._workspace.save_state(state)
        self.put(state_id)
        self._publish('did_enqueue_state', state_id, state)
        return state_id

    def load_workspace(self):
        # Browse and load states in a workspace in case we are trying to
        # continue from paused run
        loaded_state_ids = self._workspace.try_loading_workspace()
        if not loaded_state_ids:
            return False

        for id in loaded_state_ids:
            self._states.append(id)

        return True

    ###############################################
    # Synchronization helpers
    @sync
    def _notify_start_run(self):
        # notify siblings we are about to start a run()
        self._running.value += 1

    @sync
    def _notify_stop_run(self):
        # notify siblings we are about to stop this run()
        self._running.value -= 1
        if self._running is None or self._running.value < 0:
            raise SystemExit
        self._lock.notify_all()

    ################################################
    # Public API
    @property
    def running(self):
        ''' Report an estimate  of how many workers are currently running '''
        return self._running.value

    def shutdown(self):
        ''' This will stop all workers '''
        self._shutdown.set()

    def is_shutdown(self):
        ''' Returns True if shutdown was requested '''
        return self._shutdown.is_set()

    ###############################################
    # Priority queue
    @sync
    def put(self, state_id):
        ''' Enqueue it for processing '''
        self._states.append(state_id)
        self._lock.notify_all()
        return state_id

    @sync
    def get(self):
        ''' Dequeue a state with the max priority '''

        # A shutdown has been requested
        if self.is_shutdown():
            return None

        # if not more states in the queue, let's wait for some forks
        while len(self._states) == 0:
            # if no worker is running, bail out
            if self.running == 0:
                return None
            # if a shutdown has been requested, bail out
            if self.is_shutdown():
                return None
            # if there ares actually some workers running, wait for state forks
            logger.debug("Waiting for available states")
            self._lock.wait()

        state_id = self._policy.choice(list(self._states))
        if state_id is None:
            return None
        del self._states[self._states.index(state_id)]
        return state_id

    def list(self):
        ''' Returns the list of states ids currently queued '''
        return list(self._states)

    def generate_testcase(self, state, message='Testcase generated'):
        '''
        Simply announce that we're going to generate a testcase. Actual generation
        should be handled by the driver class (such as :class:`~manticore.Manticore`)

        :param state: The state to generate information about
        :param message: Accompanying message
        '''

        # broadcast test generation. This is the time for other modules
        # to output whatever helps to understand this testcase
        self._publish('will_generate_testcase', state, 'test', message)

    def fork(self, state, expression, policy='ALL', setstate=None):
        '''
        Fork state on expression concretizations.
        Using policy build a list of solutions for expression.
        For the state on each solution setting the new state with setstate

        For example if expression is a Bool it may have 2 solutions. True or False.

                                 Parent
                            (expression = ??)

                   Child1                         Child2
            (expression = True)             (expression = True)
               setstate(True)                   setstate(False)

        The optional setstate() function is supposed to set the concrete value
        in the child state.

        '''
        assert isinstance(expression, Expression)

        if setstate is None:
            setstate = lambda x, y: None

        # Find a set of solutions for expression
        solutions = state.concretize(expression, policy)

        if not solutions:
            raise ExecutorError("Forking on unfeasible constraint set")

        if len(solutions) == 1:
            setstate(state, solutions[0])
            return state

        logger.info("Forking. Policy: %s. Values: %s",
                    policy,
                    ', '.join(f'0x{sol:x}' for sol in solutions))

        self._publish('will_fork_state', state, expression, solutions, policy)

        # Build and enqueue a state for each solution
        children = []
        for new_value in solutions:
            with state as new_state:
                new_state.constrain(expression == new_value)

                # and set the PC of the new state to the concrete pc-dest
                #(or other register or memory address to concrete)
                setstate(new_state, new_value)

                self._publish('did_fork_state', new_state, expression, new_value, policy)

                # enqueue new_state
                state_id = self.enqueue(new_state)
                # maintain a list of children for logging purpose
                children.append(state_id)

        logger.info("Forking current state into states %r", children)
        return None

    def run(self):
        '''
        Entry point of the Executor; called by workers to start analysis.
        '''
        # policy_order=self.policy_order
        # policy=self.policy
        current_state = None
        current_state_id = None

        with WithKeyboardInterruptAs(self.shutdown):
            # notify siblings we are about to start a run
            self._notify_start_run()

            logger.debug("Starting Manticore Symbolic Emulator Worker (pid %d).", os.getpid())
            solver = Z3Solver()
            while not self.is_shutdown():
                try:  # handle fatal errors: exceptions in Manticore
                    try:  # handle external (e.g. solver) errors, and executor control exceptions
                        # select a suitable state to analyze
                        if current_state is None:
                            with self._lock:
                                # notify siblings we are about to stop this run
                                self._notify_stop_run()
                                try:
                                    # Select a single state_id
                                    current_state_id = self.get()
                                    # load selected state from secondary storage
                                    if current_state_id is not None:
                                        self._publish('will_load_state', current_state_id)
                                        current_state = self._workspace.load_state(current_state_id)
                                        self.forward_events_from(current_state, True)
                                        self._publish('did_load_state', current_state, current_state_id)
                                        logger.info("load state %r", current_state_id)
                                    # notify siblings we have a state to play with
                                finally:
                                    self._notify_start_run()

                        # If current_state is still None. We are done.
                        if current_state is None:
                            logger.debug("No more states in the queue, byte bye!")
                            break

                        assert current_state is not None
                        assert current_state.constraints is current_state.platform.constraints

                        # Allows to terminate manticore worker on user request
                        while not self.is_shutdown():
                            if not current_state.execute():
                                break
                        else:
                            # Notify this worker is done
                            self._publish('will_terminate_state', current_state, current_state_id, TerminateState('Shutdown'))
                            current_state = None

                    # Handling Forking and terminating exceptions
                    except Concretize as e:
                        # expression
                        # policy
                        # setstate()
                        logger.debug("Generic state fork on condition")
                        current_state = self.fork(current_state, e.expression, e.policy, e.setstate)

                    except TerminateState as e:
                        # Notify this worker is done
                        self._publish('will_terminate_state', current_state, current_state_id, e)

                        logger.debug("Generic terminate state")
                        if e.testcase:
                            self.generate_testcase(current_state, str(e))
                        current_state = None

                    except SolverException as e:
                        # raise
                        import traceback
                        trace = traceback.format_exc()
                        logger.error("Exception: %s\n%s", str(e), trace)

                        # Notify this state is done
                        self._publish('will_terminate_state', current_state, current_state_id, e)

                        if solver.check(current_state.constraints):
                            self.generate_testcase(current_state, "Solver failed" + str(e))
                        current_state = None

                except (Exception, AssertionError) as e:
                    # raise
                    import traceback
                    trace = traceback.format_exc()
                    logger.error("Exception: %s\n%s", str(e), trace)
                    # Notify this worker is done
                    self._publish('will_terminate_state', current_state, current_state_id, e)
                    current_state = None
                    logger.setState(None)

            assert current_state is None or self.is_shutdown()

            # notify siblings we are about to stop this run
            self._notify_stop_run()
Ejemplo n.º 7
0
def run_server():
    from connectn.tournament import run_tournament_process
    from multiprocessing.managers import SyncManager

    cu.configure_logging(cu.SERVER_PROCESS_LOG)
    logger = logging.getLogger(__name__)

    cu.start_stunnel(True)

    manager = SyncManager()
    manager.start(_process_init)
    sq = mp.Queue()
    rq = manager.Queue()
    shutdown = manager.Event()
    rg = mp.Process(
        target=_process_init,
        args=(run_tournament_process, sq, rq, shutdown, cu.PLAY_ALL),
        name="RunGames",
    )
    rg.start()

    logger.info("Started run_games process")
    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as ls:
        try:
            ls.settimeout(5.0)
            ls.bind(("localhost", cu.LISTEN_PORT))
            ls.listen(5)
            logger.info("Started server listening socket")
        except Exception:
            logger.exception("Failure when binding to the listening port.")
        else:
            updated_agent_archives = []
            running = True
            while running:
                try:
                    (cs, addr) = ls.accept()
                    logger.info("Accepted connection.")
                    handle_client(cs, updated_agent_archives)
                except socket.timeout:
                    if len(updated_agent_archives):
                        logger.info(
                            f"Server sending {len(updated_agent_archives)} new agents for game-play."
                        )
                        logger.info(f"{updated_agent_archives}")
                        sq.put(updated_agent_archives)
                        updated_agent_archives = []
                except cu.InactiveSocket:
                    logger.exception("Connection failed")
                except KeyboardInterrupt:
                    inp = input("Shutdown? y/[n] ").lower()
                    while inp not in ("", "y", "n"):
                        inp = input("Shutdown? y/[n] ").lower()
                    if inp == "y":
                        logger.info("KeyboardInterrupt: Shutting down")
                        running = False
                    else:
                        inp = input("Play all games? y/[n] ").lower()
                        while inp not in ("", "y", "n"):
                            inp = input("Play all games? y/[n] ").lower()
                        if inp == "y":
                            sq.put("PLAY_ALL")
                except Exception:
                    logger.exception("Unexpected error, will try to keep running.")

                store_results_local(rq)
        finally:
            """
            If the port is orphaned use:
            fuser -kn tcp <port>
            """
            ls.shutdown(socket.SHUT_RDWR)
            logger.info("Closed server socket")

    logger.info("Telling run_games process to shutdown.")
    shutdown.set()
    rg.join()
    logger.info("Finished server shutdown.")
Ejemplo n.º 8
0
        vowel_point[0].set_ydata(vowel_point[0].get_ydata() + ydelta)
        return (vowel_point, )

    ani = animation.FuncAnimation(fig,
                                  vowel_update,
                                  blit=False,
                                  repeat=True,
                                  interval=10)
    plt.show()


if __name__ == '__main__':
    # some multiprocessing setup
    thread_manager = SyncManager()
    thread_manager.start(mgr_init)
    exit_event = thread_manager.Event()
    audio_queue = thread_manager.Queue()

    # start audio input thread
    audio_input = multiprocessing.Process(target=get_audio,
                                          args=(audio_queue, exit_event))
    audio_input.daemon = True
    audio_input.start()

    gui_process = multiprocessing.Process(target=do_gui, args=(audio_queue, ))
    gui_process.daemon = True
    gui_process.start()

    while gui_process.exitcode == None:
        time.sleep(0.1)
Ejemplo n.º 9
0
class SubprocessBase(object):
    '''Base class for subprocess routines that use socket connection'''
    __metaclass__ = abc.ABCMeta
    
    _error_msg = ('This executable should only be called from '
                  'inside the main program.')
                 
    def __init__(self):
        self._pid  = os.getpid()
        #abort event
        self._mgr  = SyncManager()
        self._mgr.start(ignore_interrupt)
        self._abort_event = self._mgr.Event()
        #stdout/err
        self._err  = StreamEncoder(sys.stderr)
        self._out  = StreamEncoder(sys.stdout)
        #connection information
        self._port = None
        self._con  = None
        self()
    #end def
    
    def __del__(self):
        if self._con is not None:
            self.terminate()
            self._con.close()
        self._mgr.shutdown()
    #end def
    

    def _check_tty(self):
        if sys.stdin.isatty():
            self._err.write(self._error_msg+'\n')
            return True
        return False
    #end def
    
    def _sig_handler(self, signal, frame):
        if self._pid != os.getpid(): return
        self._out.write('%d aborting...\n'%os.getpid())
        self._abort_event.set(); sleep(0.1)
        tmpStorage.clean_tmp_files()
    #end def
        
    def _set_sig_handlers(self):
        signal.signal(signal.SIGINT,  self._sig_handler)
        signal.signal(signal.SIGTERM, self._sig_handler)
        signal.signal(signal.SIGQUIT, self._sig_handler)
    #end def
    
    
    def _parse_args(self):
        parser = argparse.ArgumentParser(self._error_msg)
        conf_group = parser.add_argument_group('Preset configuration')
        conf_group.add_argument('port', metavar='number', 
                                type=int, nargs=1,
                                help='Port number to connect to.')
        args = parser.parse_args()
        self._port = args.port[0]
    #end def
    
    def _get_auth_key(self):
        try: self._auth = sys.stdin.readline().strip('\n')
        except: self._auth = None
    #end def
    
    def _connect(self):
        if self._port is None: return False
        try: self._con = mpc.Client(('localhost', self._port), 
                                    authkey=self._auth)
        except mpc.AuthenticationError, e:
            self._err.write('Cannot connect to the port %d\n%s\n' % (self._port,str(e)))
            return False
        except: 
Ejemplo n.º 10
0
def main():
    # global server
    global ticker_condition, platforms
    global get_dict, get_condition

    # Only works with 3.6+, checking version
    version = sys.version.split(' ')[0].split('.')
    invalid = False
    if int(version[0]) < 3:
        invalid = True
    elif int(version[0]) == 3:
        if int(version[1]) < 6:
            invalid = True
    if invalid:
        print('Invalid version of Python. Requires Python 3.6 or newer')
        return

    read_config()

    manager = SyncManager()
    manager.start()

    running = manager.Event()
    running.set()

    platforms = manager.dict()
    ticker_condition = manager.Condition()

    r_queue = manager.Queue()
    r_condition = manager.Condition()

    # used for HTTP GET requests and their responses
    get_dict = manager.dict()
    get_condition = manager.Condition()

    # used for HTTP PUT and POST to issue their responses
    reply_queue = manager.Queue()
    reply_condition = manager.Condition()

    # A pool closes, don't want it to close.
    reply_list = []
    reply_args = (running, reply_queue, reply_condition)
    for i in range(config['threads']['return_threads']):
        r = Process(target=outbound, args=reply_args, name='Reply_%s' % i)
        r.deamon = True
        reply_list.append(r)
        r.start()

    # A pool closes, don't want it to close.
    r_list = []
    r_args = (running, api_key, platforms, r_queue, r_condition, get_dict,
              get_condition, reply_queue, reply_condition, ticker_condition,
              platform_lock)
    for i in range(config['threads']['api_threads']):
        r = Process(target=retriever, args=r_args, name='Retriever_%s' % i)
        r.deamon = True
        r_list.append(r)
        r.start()

    ticking_args = (running, platforms, ticker_condition, r_queue, r_condition)
    ticking = Process(target=ticker, args=ticking_args, name='Ticker')
    ticking.deamon = True
    ticking.start()

    poking_args = (running, platforms, ticker_condition)
    poking = Process(target=poker, args=poking_args, name='Poker')
    poking.daemon = True
    poking.start()

    handler = CreateSyncHTTPHandler(manager)
    #server = http.server.HTTPServer( (config['server']['host'], config['server']['port']), MyHTTPHandler)
    server = http.server.HTTPServer(
        (config['server']['host'], config['server']['port']), handler)
    # server.set_manager(manager) # Need another way to pass this in, since HTTPServer doesn't have it...
    try:
        server.serve_forever()
    except KeyboardInterrupt:
        print('\nStopping server...')