コード例 #1
0
ファイル: audio_lib.py プロジェクト: liveonnet/prom_notify
 def __init__(self, conf_path='config/pn_conf.yaml'):
     # input param
     self.conf_path = conf_path
     self.conf = getConf(self.conf_path, root_key='audio')
     if self.conf['target'] == 'pi':
         self.t2s = Text2SpeechBaidu(self.conf_path)  # sync
     else:
         self.t2s = Text2SpeechXunFei(self.conf_path)  # sync
     self.executor_t2s = concurrent.futures.ProcessPoolExecutor(2)  # async
     if self.conf['use_custom_manager']:
         # create proxy manager
         mgr = SyncManager(
             (get_lan_ip(), self.conf['custom_manager_port']),
             self.conf['custom_manager_authkey'].encode('utf8'))
         sleep(0.5)  # wait for manager to start
         mgr.connect()
     else:
         mgr = multiprocessing.Manager()
     self.q_audio = mgr.Queue()
     #-#        debug('audio data queue created. %s', self.q_audio)
     self.event_exit = mgr.Event()
     multiprocessing.current_process(
     ).authkey = self.conf['custom_manager_authkey'].encode(
         'utf8')  # https://bugs.python.org/issue7503
     self.proc_play = multiprocessing.Process(target=self.playAudioFromQ,
                                              args=(self.q_audio,
                                                    self.event_exit))
     self.proc_play.start()
     #-#        debug('play background proc start. %s', self.proc_play)
     # 触发进程池worker进程创建, 貌似提前创建的占用内存小些
     self.executor_t2s.map(noop_func, (None, None))
コード例 #2
0
def server_manager(address, authkey):
    mgr = SyncManager(address, authkey)
    setproctitle('process_mgr')
    debug('manager server started.')
    server = mgr.get_server()
    server.serve_forever()
    debug('manager server stopped.')
コード例 #3
0
def main():

    config = load_config(extra_args_func=gw_args)
    init_logger(config)

    log.info(f'Backend.AI Gateway {__version__}')
    log.info(f'runtime: {env_info()}')

    log_config = logging.getLogger('ai.backend.gateway.config')
    log_config.debug('debug mode enabled.')

    if config.debug:
        aiohttp.log.server_logger.setLevel('DEBUG')
        aiohttp.log.access_logger.setLevel('DEBUG')
    else:
        aiohttp.log.server_logger.setLevel('WARNING')
        aiohttp.log.access_logger.setLevel('WARNING')

    asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())

    num_workers = os.cpu_count()
    manager = SyncManager()
    manager.start(lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
    shared_states = manager.Namespace()
    shared_states.lock = manager.Lock()
    shared_states.barrier = manager.Barrier(num_workers)
    shared_states.agent_last_seen = manager.dict()

    try:
        aiotools.start_server(server_main, num_workers=num_workers,
                              extra_procs=[event_router],
                              args=(config, shared_states))
    finally:
        manager.shutdown()
        log.info('terminated.')
コード例 #4
0
ファイル: kubernetes_scheduler.py プロジェクト: kalebinn/dbnd
    def __init__(
        self, kube_config, task_queue, result_queue, kube_client, worker_uuid, kube_dbnd
    ):
        super(DbndKubernetesScheduler, self).__init__(
            kube_config, task_queue, result_queue, kube_client, worker_uuid
        )
        self.kube_dbnd = kube_dbnd

        # PATCH watcher communication manager
        # we want to wait for stop, instead of "exit" inplace, so we can get all "not" received messages
        from multiprocessing.managers import SyncManager

        # TODO: why can't we use original SyncManager?
        # Scheduler <-> (via _manager) KubeWatcher
        # if _manager dies inplace, we will not get any "info" from KubeWatcher until shutdown
        self._manager = SyncManager()
        self._manager.start(mgr_init)

        self.watcher_queue = self._manager.Queue()
        self.current_resource_version = 0
        self.kube_watcher = self._make_kube_watcher_dbnd()

        # pod to airflow key (dag_id, task_id, execution_date)
        self.submitted_pods = {}  # type: Dict[str,SubmittedPodState]

        # sending data to databand tracker
        self.metrics_logger = KubernetesMetricsLogger()

        # disappeared pods mechanism
        self.last_disappeared_pods = {}
        self.current_iteration = 1
        # add `k8s-scheduler:` prefix to all log messages
        self._log = PrefixLoggerAdapter("k8s-scheduler", self.log)
コード例 #5
0
def main():
    config = load_config()
    cred = config["drive4data.influx"]
    dry_run = bool(config.get("dry_run", False))

    os.makedirs("out", exist_ok=True)
    with ExitStack() as stack:
        executor = concurrent.futures.ProcessPoolExecutor(max_workers=8)
        stack.enter_context(executor)

        manager = SyncManager()
        manager.start(mgr_init)
        stack.enter_context(manager)

        client = InfluxDBClient(batched=False, async_executor=True, time_epoch=TIME_EPOCH, **cred)
        stack.enter_context(closing(client))

        try:
            if not dry_run:
                client.drop_measurement("trips")
            preprocess_trips(client, executor, manager, dry_run=dry_run)
            if not dry_run:
                client.drop_measurement("charge_cycles")
            preprocess_cycles(client, executor, manager, dry_run=dry_run)
        except:
            executor.shutdown(wait=False)
            raise
コード例 #6
0
ファイル: wx_lib.py プロジェクト: wliustc/prom_notify
    def __init__(self, conf_path='config/pn_conf.yaml'):
        self.conf_path = os.path.abspath(conf_path)
        self.conf = getConf(self.conf_path, root_key='itchat')

        self.thread_id = None

        self.gid = None  # 记录我们群的UserName
        if self.conf['use_custom_manager']:
            # create proxy manager
            mgr = SyncManager(
                (get_lan_ip(), self.conf['custom_manager_port']),
                self.conf['custom_manager_authkey'].encode('utf8'))
            #-#            sleep(0.5)  # wait for manager to start
            mgr.connect()
        else:
            mgr = multiprocessing.Manager()
        self.q_send = mgr.Queue()
        self.event_exit = mgr.Event()
        multiprocessing.current_process(
        ).authkey = self.conf['custom_manager_authkey'].encode(
            'utf8')  # https://bugs.python.org/issue7503
        self.proc_wx = multiprocessing.Process(target=self.run,
                                               args=(self.event_exit,
                                                     self.q_send))
        self.proc_wx.start()
コード例 #7
0
    def __init__(
        self, kube_config, task_queue, result_queue, kube_client, worker_uuid, kube_dbnd
    ):
        super(DbndKubernetesScheduler, self).__init__(
            kube_config, task_queue, result_queue, kube_client, worker_uuid
        )
        self.kube_dbnd = kube_dbnd

        # PATCH watcher communication manager
        # we want to wait for stop, instead of "exit" inplace, so we can get all "not" received messages
        from multiprocessing.managers import SyncManager

        # Scheduler <-> (via _manager) KubeWatcher
        # if _manager dies inplace, we will not get any "info" from KubeWatcher until shutdown
        self._manager = SyncManager()
        self._manager.start(mgr_init)

        self.watcher_queue = self._manager.Queue()
        self.current_resource_version = 0
        self.kube_watcher = self._make_kube_watcher_dbnd()
        # will be used to low level pod interactions
        self.failed_pods_to_ignore = []
        self.running_pods = {}
        self.pod_to_task = {}
        self.metrics_logger = KubernetesMetricsLogger()
コード例 #8
0
ファイル: manticore.py プロジェクト: zumb08/manticore
    def _manticore_multiprocessing(self):
        def raise_signal():
            signal.signal(signal.SIGINT, signal.SIG_IGN)

        self._worker_type = WorkerProcess
        # This is the global manager that will handle all shared memory access
        # See. https://docs.python.org/3/library/multiprocessing.html#multiprocessing.managers.SyncManager
        self._manager = SyncManager()
        self._manager.start(raise_signal)
        # The main manticore lock. Acquire this for accessing shared objects
        # THINKME: we use the same lock to access states lists and shared contexts
        self._lock = self._manager.Condition()
        self._killed = self._manager.Value(bool, False)
        self._running = self._manager.Value(bool, False)
        # List of state ids of States on storage
        self._ready_states = self._manager.list()
        self._terminated_states = self._manager.list()
        self._busy_states = self._manager.list()
        self._killed_states = self._manager.list()
        # The multiprocessing queue is much slower than the deque when it gets full, so we
        # triple the size in order to prevent that from happening.
        self._log_queue = self._manager.Queue(15000)
        self._shared_context = self._manager.dict()
        self._context_value_types = {
            list: self._manager.list,
            dict: self._manager.dict
        }
コード例 #9
0
def prepare_experiment(env, args):
    # Manager to share PER between a learner and explorers
    SyncManager.register('PrioritizedReplayBuffer', PrioritizedReplayBuffer)
    manager = SyncManager()
    manager.start()

    kwargs = get_default_rb_dict(args.replay_buffer_size, env)
    kwargs["check_for_update"] = True
    global_rb = manager.PrioritizedReplayBuffer(**kwargs)

    # queues to share network parameters between a learner and explorers
    n_queue = 1 if args.n_env > 1 else args.n_explorer
    n_queue += 1  # for evaluation
    queues = [manager.Queue() for _ in range(n_queue)]

    # Event object to share training status. if event is set True, all exolorers stop sampling transitions
    is_training_done = Event()

    # Lock
    lock = manager.Lock()

    # Shared memory objects to count number of samples and applied gradients
    trained_steps = Value('i', 0)

    return global_rb, queues, is_training_done, lock, trained_steps
コード例 #10
0
ファイル: yiff_scrape.py プロジェクト: ImportTaste/xA-Scraper
def run_local():
    from multiprocessing.managers import SyncManager

    manager = SyncManager()
    manager.start()
    flags.namespace = manager.Namespace()
    flags.namespace.run = True

    signal.signal(signal.SIGINT, signal_handler)

    print(sys.argv)
    ins = GetYp()
    # ins.getCookie()
    print(ins)
    print("Instance: ", ins)

    update_nl = True
    if "no_namelist" in sys.argv:
        update_nl = False
    if "drain" in sys.argv:
        update_nl = False
    if not update_nl:
        print("Not fetching new names from site!")

    # ins.go(ctrlNamespace=flags.namespace, update_namelist=True)
    ins.go(ctrlNamespace=flags.namespace,
           update_namelist=update_nl,
           local=True)
コード例 #11
0
ファイル: manager.py プロジェクト: springernature/mpire
def start_manager_server() -> SyncManager:
    """
    Start a SyncManager

    :return: SyncManager
    """
    for port_nr in reversed(range(8080, 8100)):
        try:
            # If a port is already occupied the SyncManager process will spit out EOFError and OSError messages. The
            # former can be catched, but the latter will still show up. So we first check if a port is available
            # manually
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.bind(('', port_nr))
            s.close()

            # Create manager
            sm = SyncManager(address=('', port_nr), authkey=b'mpire_dashboard')
            sm.register('get_dashboard_tqdm_dict', get_dashboard_tqdm_dict)
            sm.register('get_dashboard_tqdm_details_dict', get_dashboard_tqdm_details_dict)
            sm.register('get_dashboard_tqdm_lock', get_dashboard_tqdm_lock)
            sm.start()

            # Set host and port number so other processes know where to connect to
            DASHBOARD_MANAGER_HOST.value = b''
            DASHBOARD_MANAGER_PORT.value = port_nr

            return sm

        except OSError:
            # Port is occupied, ignore it and try another
            pass

    raise OSError("All ports (8080-8099) are in use")
コード例 #12
0
def joinable_returnable_process_manager(n, ):
    '''
    This decorator runs n processes to complete a list of tasks given to the decorated function. It waits until all compuations have completed, and returns a list of results.
    :param n: Number of processes to run in parallel
    :return: List of results
    '''

    manager = SyncManager()
    manager.start()
    return_object = manager.list()

    def faux_decorator(f):
        def wrapper(list_of_parameters):
            q = manager.Queue()
            lock = manager.Lock()
            [q.put(item) for item in list_of_parameters]
            processes = [[
                q.put(None),
                queued_start_process(f, q, lock, return_object)
            ] for i in range(n)]

            [p[1].join() for p in processes]
            return return_object

        return wrapper

    return faux_decorator
コード例 #13
0
def function_handler(payload):
    job = SimpleNamespace(**payload)

    manager = SyncManager()
    manager.start()
    job_queue = manager.Queue()
    job_runners = []

    processes = min(job.worker_processes, len(job.call_ids))
    logger.info("Starting {} processes".format(processes))

    for runner_id in range(processes):
        p = mp.Process(target=process_runner, args=(runner_id, job_queue))
        job_runners.append(p)
        p.start()

    for call_id in job.call_ids:
        data_byte_range = job.data_byte_ranges.pop(0)
        logger.info('Going to execute job {}-{}'.format(job.job_key, call_id))
        job_queue.put((job, call_id, data_byte_range))

    for i in range(processes):
        job_queue.put(ShutdownSentinel())

    for runner in job_runners:
        runner.join()

    manager.shutdown()
コード例 #14
0
 def __init__(self, args):
     # Init print management
     Print.init(log=args.log, debug=args.debug, all=args.all, cmd=args.prog)
     # Print command-line
     Print.command()
     self._process_color_arg(args)
     # Get project and related configuration
     self.project = args.project
     self.config_dir = args.i
     self.processes = args.max_processes if args.max_processes <= cpu_count(
     ) else cpu_count()
     self.use_pool = (self.processes != 1)
     self.lock = Lock()
     self.nbfiles = 0
     self.nbskip = 0
     self.nberrors = 0
     self.file_filter = []
     if args.include_file:
         self.file_filter.extend([(f, True) for f in args.include_file])
     else:
         # Default includes netCDF only
         self.file_filter.append(('^.*\.nc$', True))
     if args.exclude_file:
         # Default exclude hidden files
         self.file_filter.extend([(f, False) for f in args.exclude_file])
     else:
         self.file_filter.append(('^\..*$', False))
     self.dir_filter = args.ignore_dir
     # Init process manager
     if self.use_pool:
         manager = SyncManager()
         manager.start()
         Print.BUFFER = manager.Value(c_char_p, '')
         self.progress = manager.Value('i', 0)
     else:
         self.progress = Value('i', 0)
     self.tunits_default = None
     if self.project in DEFAULT_TIME_UNITS.keys():
         self.tunits_default = DEFAULT_TIME_UNITS[self.project]
     # Change frequency increment
     if args.set_inc:
         for table, frequency, increment, units in args.set_inc:
             if table not in set(zip(*FREQ_INC.keys())[0]):
                 raise InvalidTable(table)
             if frequency not in set(zip(*FREQ_INC.keys())[1]):
                 raise InvalidFrequency(frequency)
             keys = [(table, frequency)]
             if table == 'all':
                 keys = [k for k in FREQ_INC.keys() if k[1] == frequency]
             if frequency == 'all':
                 keys = [k for k in FREQ_INC.keys() if k[0] == table]
             for key in keys:
                 FREQ_INC[key] = [float(increment), str(units)]
     # Get reference time properties if submitted
     # Default is to deduce them from first file scanned
     self.ref_calendar = args.calendar
     self.ref_units = args.units
     # Init collector
     self.sources = None
コード例 #15
0
def init_shared_manager(items):
    """Initialize and start shared manager."""
    for cls in items:
        proxy = create_proxy(cls)
        SyncManager.register(cls.__name__, cls, proxy)
    manager = SyncManager()
    manager.start()
    return manager
コード例 #16
0
 def setUp(self):
     if not hasattr(self, 'manager'):
         self.manager = SyncManager()
         self.manager.start(
             lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
     l = linux.Linux('/bin/ls')
     self.state = State(ConstraintSet(), l)
     self.lock = self.manager.Condition()
コード例 #17
0
ファイル: handler.py プロジェクト: lithops-cloud/lithops
def function_handler(payload):
    job = SimpleNamespace(**payload)
    setup_lithops_logger(job.log_level)

    processes = min(job.worker_processes, len(job.call_ids))
    logger.info('Tasks received: {} - Concurrent processes: {}'.format(
        len(job.call_ids), processes))

    env = job.extra_env
    env['LITHOPS_WORKER'] = 'True'
    env['PYTHONUNBUFFERED'] = 'True'
    os.environ.update(env)

    storage_config = extract_storage_config(job.config)
    internal_storage = InternalStorage(storage_config)
    job.func = get_function_and_modules(job, internal_storage)
    job_data = get_function_data(job, internal_storage)

    if processes == 1:
        job_queue = queue.Queue()
        for call_id in job.call_ids:
            data = job_data.pop(0)
            job_queue.put((job, call_id, data))
        job_queue.put(ShutdownSentinel())
        process_runner(job_queue)
    else:
        manager = SyncManager()
        manager.start()
        job_queue = manager.Queue()
        job_runners = []

        for call_id in job.call_ids:
            data = job_data.pop(0)
            job_queue.put((job, call_id, data))

        for i in range(processes):
            job_queue.put(ShutdownSentinel())

        for runner_id in range(processes):
            p = mp.Process(target=process_runner, args=(job_queue, ))
            job_runners.append(p)
            p.start()
            logger.info('Worker process {} started'.format(runner_id))

        for runner in job_runners:
            runner.join()

        manager.shutdown()

    # Delete modules path from syspath
    module_path = os.path.join(MODULES_DIR, job.job_key)
    if module_path in sys.path:
        sys.path.remove(module_path)

    # Unset specific job env vars
    for key in job.extra_env:
        os.environ.pop(key, None)
    os.environ.pop('__LITHOPS_TOTAL_EXECUTORS', None)
コード例 #18
0
 def __init__(self, args):
     super(MultiprocessingContext, self).__init__(args)
     # Configuration directory (i.e., INI files folder)
     self.config_dir = args.i
     # Command line action
     if hasattr(args, 'action'):
         self.action = args.action
     # Input
     self.directory = args.directory
     if hasattr(args, 'dataset_list'):
         self.dataset_list = args.dataset_list
     if hasattr(args, 'dataset_id'):
         self.dataset_id = args.dataset_id
     if hasattr(args, 'incoming'):
         self.incoming = args.incoming
     # Multiprocessing configuration
     self.processes = args.max_processes if args.max_processes <= cpu_count(
     ) else cpu_count()
     self.use_pool = (self.processes != 1)
     # Scan counters
     self.scan_errors = 0
     self.scan_data = 0
     self.nbsources = 0
     # Process manager
     if self.use_pool:
         self.manager = SyncManager()
         self.manager.start()
         self.progress = self.manager.Value('i', 0)
         Print.BUFFER = self.manager.Value(c_char_p, '')
     else:
         self.progress = Value('i', 0)
     # Stdout lock
     self.lock = Lock()
     # Directory filter (esgmapfile + esgcheckvocab)
     if hasattr(args, 'ignore_dir'):
         self.dir_filter = args.ignore_dir
     # File filters (esgmapfile + esgcheckvocab)
     self.file_filter = []
     if hasattr(args, 'include_files'):
         if args.include_file:
             self.file_filter.extend([(f, True) for f in args.include_file])
         else:
             # Default includes netCDF only
             self.file_filter.append(('^.*\.nc$', True))
     if hasattr(args, 'exclude_file'):
         if args.exclude_file:
             self.file_filter.extend([(f, False)
                                      for f in args.exclude_file])
         else:
             # Default exclude hidden files
             self.file_filter.append(('^\..*$', False))
     # Facet declaration (esgcheckvocab + esgdrs)
     self.set_values = {}
     if hasattr(args, 'set_value') and args.set_value:
         self.set_values = dict(args.set_value)
     self.set_keys = {}
     if hasattr(args, 'set_key') and args.set_key:
         self.set_keys = dict(args.set_key)
コード例 #19
0
 def setUp(self):
     if not hasattr(self, 'manager'):
         self.manager = SyncManager()
         self.manager.start(
             lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
     dirname = os.path.dirname(__file__)
     l = linux.Linux(os.path.join(dirname, 'binaries', 'basic_linux_amd64'))
     self.state = State(ConstraintSet(), l)
     self.lock = self.manager.Condition()
コード例 #20
0
ファイル: __init__.py プロジェクト: webiumsk/WOT-0.9.17-CT
def Manager():
    """
    Returns a manager associated with a running server process
    
    The managers methods such as `Lock()`, `Condition()` and `Queue()`
    can be used to create shared objects.
    """
    from multiprocessing.managers import SyncManager
    m = SyncManager()
    m.start()
    return m
コード例 #21
0
    def __init__(self, kube_dbnd=None):
        # type: (DbndKubernetesExecutor, DbndKubernetesClient) -> None
        super(DbndKubernetesExecutor, self).__init__()

        from multiprocessing.managers import SyncManager

        self._manager = SyncManager()

        self.kube_dbnd = kube_dbnd
        _update_airflow_kube_config(airflow_kube_config=self.kube_config,
                                    engine_config=kube_dbnd.engine_config)
コード例 #22
0
ファイル: workspace.py プロジェクト: evanpjensen/manticore
def manager():
    """ Get multiprocessing manager

    Transparently creates a :obj:`multiprocessing.manager.SyncManager` the first time it's invoked. Used for sharing values across multipe concurrently executing Manticore instances.

    """
    global _manager
    if _manager is None:
        _manager = SyncManager()
        _manager.start(lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
    return _manager
コード例 #23
0
def main(args=None):
    manager = SyncManager()
    manager.start(interupt_manager)
    try:
        server = create_server()
        register()
        server.serve()
    finally:
        unregister()
        print('finally FacePi shutting down')
        manager.shutdown()
コード例 #24
0
def main(args=None):
    manager = SyncManager()
    manager.start(interupt_manager)
    try:
        server = create_server()
        register()
        server.serve()

    finally:
        unregister()
        logging.debug('finally AgendaPi shutting down')
        manager.shutdown()
コード例 #25
0
    def __init__(self, number_of_workers, number_of_queued_connections):
        SyncManager.register('LogFileManager', LogFileManager)
        manager = SyncManager()
        manager.start()
        logs = manager.LogFileManager()

        self.writer = WriterDbHandler(logs, number_of_workers,
                                      number_of_queued_connections, HOST,
                                      WRITER_PORT)
        self.reader = ReaderDbHandler(logs, number_of_workers,
                                      number_of_queued_connections, HOST,
                                      READER_PORT)
コード例 #26
0
    def __init__(self,
                 initial=None,
                 store=None,
                 policy='random',
                 context=None,
                 **kwargs):
        super().__init__(**kwargs)

        # Signals / Callbacks handlers will be invoked potentially at different
        # worker processes. State provides a local context to save data.

        self.subscribe('did_load_state', self._register_state_callbacks)

        # This is the global manager that will handle all shared memory access among workers
        self.manager = SyncManager()
        self.manager.start(
            lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))

        # The main executor lock. Acquire this for accessing shared objects
        self._lock = self.manager.Condition()

        # Shutdown Event
        self._shutdown = self.manager.Event()

        # States on storage. Shared dict state name ->  state stats
        self._states = self.manager.list()

        # Number of currently running workers. Initially no running workers
        self._running = self.manager.Value('i', 0)

        self._workspace = Workspace(self._lock, store)

        # Executor wide shared context
        if context is None:
            context = {}
        self._shared_context = self.manager.dict(context)

        # scheduling priority policy (wip)
        # Set policy
        policies = {
            'random': Random,
            'uncovered': Uncovered,
            'branchlimited': BranchLimited,
        }
        self._policy = policies[policy](self)
        assert isinstance(self._policy, Policy)

        if self.load_workspace():
            if initial is not None:
                logger.error("Ignoring initial state")
        else:
            if initial is not None:
                self.add(initial)
コード例 #27
0
    def func_server(self):
        p = current_process()
        print('{} id: {}'.format(p.name, id(p)))
        host = '127.0.0.1'
        port = 12345
        authkey = 'authkey'

        # SyncManager.register('get_list', list, proxytype=ListProxy)
        mgr = SyncManager(address=(host, port),
                          authkey=authkey.encode('utf-8'))
        server = mgr.get_server()
        server.serve_forever()
コード例 #28
0
 def __init__(self):
     self._pid  = os.getpid()
     #abort event
     self._mgr  = SyncManager()
     self._mgr.start(ignore_interrupt)
     self._abort_event = self._mgr.Event()
     #stdout/err
     self._err  = StreamEncoder(sys.stderr)
     self._out  = StreamEncoder(sys.stdout)
     #connection information
     self._port = None
     self._con  = None
     self()
コード例 #29
0
ファイル: process_helpers.py プロジェクト: dmiwell/mp_manager
 def __init__(self):
     mgr = SyncManager()
     mgr.start(signal.signal, (signal.SIGINT, signal.SIG_IGN))
     self.ns_default = mgr.Namespace()
     self.ns_default.error = None
     self.ns_stats = mgr.Namespace()
     self.input_queue = mgr.Queue(maxsize=100)
     self.error_occurred = mgr.Event()
     self.error_processed = mgr.Event()
     self.batch_done = mgr.Event()
     self.mgr = mgr
     self.stats_lock = mgr.Lock()
     self.main_lock = mgr.Lock()
コード例 #30
0
def _manager_process(addr):
    logger = get_logger()
    spawn(idle_watcher)
    try:
        listener: Listener = Listener(addr, "AF_UNIX")

        with listener:
            manager = SyncManager()
            manager.start(manager_init)
            try:

                def process_queue(q: Queue, idx: int):
                    for val_idx in range(0, QUEUE_DEPTH):
                        put_string = f"Request #{idx}, Value #{val_idx}"
                        logger.info(f"**** Sending {put_string} on {q._id}")
                        q.put(put_string)
                        logger.info(f"**** Sent {put_string} on {q._id}")
                        sleep(0.05)

                    logger.info(
                        f"**** Putting None in queue request #{idx} to empty on {q._id}"
                    )
                    q.put(None)
                    logger.info(
                        f"**** Waiting for queue request #{idx} to empty on {q._id}"
                    )
                    q.join()
                    logger.info(
                        f"**** All done with request #{idx} on {q._id}")

                def process_conn(conn: Connection, idx: int):
                    with conn:
                        logger.info(f"**** Accepted request #{idx}")
                        q: Queue = manager.Queue(QUEUE_SIZE)
                        logger.info(
                            f"**** Passing request #{idx} queue {q._id}")
                        conn.send(q)
                        logger.info(
                            f"**** Passed request #{idx} queue {q._id}")

                    spawn(process_queue, q, idx)

                for i in range(0, REQUEST_COUNT):
                    spawn(process_conn, listener.accept(), i)

                wait(timeout=300)
                # logger.warning("\n".join(format_run_info()))
            finally:
                manager.shutdown()
    finally:
        get_hub().destroy()