Exemplo n.º 1
0
class ManticoreMultiprocessing(ManticoreBase):
    _worker_type = WorkerProcess

    def __init__(self, *args, **kwargs):
        # This is the global manager that will handle all shared memory access
        # See. https://docs.python.org/3/library/multiprocessing.html#multiprocessing.managers.SyncManager
        self._manager = SyncManager()
        self._manager.start(raise_signal)
        # The main manticore lock. Acquire this for accessing shared objects
        # THINKME: we use the same lock to access states lists and shared contexts
        self._lock = self._manager.Condition()
        self._killed = self._manager.Value(bool, False)
        self._running = self._manager.Value(bool, False)

        # List of state ids of States on storage
        self._ready_states = self._manager.list()
        self._terminated_states = self._manager.list()
        self._busy_states = self._manager.list()
        self._killed_states = self._manager.list()
        self._shared_context = self._manager.dict()
        self._context_value_types = {
            list: self._manager.list,
            dict: self._manager.dict
        }

        super().__init__(*args, **kwargs)
Exemplo n.º 2
0
 def _finalize_manager(process, address, authkey, state, _Client):
     try:
         SyncManager._finalize_manager(process, address, authkey, state,
                                       _Client)
     except WindowsError:
         # http://stackoverflow.com/questions/17076679/windowserror-access-is-denied-on-calling-process-terminate
         pass
Exemplo n.º 3
0
   def dconnect(self):
      """
      Attempts to connect to the DFS on the host/port for which the Node was initialized for.
      If no connection can be made, Node will keep attempting to connect until a connection
      can be established. Once a connection can be established the remove methods requested
      will be registered.
      """

      # remove connection from cache:
      # BaseProxy class has thread local storage which caches the connection
      # which is reused for future connections causing "borken pipe" errors on
      # creating new manager.
      if self.dfs in BaseProxy._address_to_local:
         if hasattr(BaseProxy._address_to_local[self.dfs][0], 'connection'):
            del BaseProxy._address_to_local[self.dfs][0].connection

      # register handlers
      SyncManager.register("get_nodes")

      print "connecting to dfs", self.dfs
      while self.alive:

         try:
            self.impd= SyncManager(address= self.dfs, authkey= self.dauthkey)
            self.impd.connect()
            print "connected to dfs", self.dfs
            break
         except (EOFError, IOError, SocketError) as e:
            print "could not connect ...trying again", str(e)
            sleep(1)
Exemplo n.º 4
0
def main_proc():

    pid = os.getpid()
    # initialize manager
    mgr = SyncManager()
    mgr.start(mgr_init)
    
    try:
        # Create share object between processes
        shared_queue = mgr.Queue()

        # Create subprocesses
        put_proc = Process(target=put_data_proc, args=(shared_queue,))
        put_proc_1 = Process(target=put_data_proc_1, args=(shared_queue,))
        get_proc = Process(target=get_data_proc, args=(shared_queue,))

        # Start the processes
        put_proc.start()
        put_proc_1.start()
        get_proc.start()

        # Join the processes until they finished
        put_proc.join()
        put_proc_1.join()
        get_proc.join()

    except KeyboardInterrupt:
        print "Main process (pid=%s) was interruptted" % pid
    finally:
        mgr.shutdown()
Exemplo n.º 5
0
def server_manager(address, authkey):
    mgr = SyncManager(address, authkey)
    setproctitle('process_mgr')
    debug('manager server started.')
    server = mgr.get_server()
    server.serve_forever()
    debug('manager server stopped.')
Exemplo n.º 6
0
    def _manticore_multiprocessing(self):
        def raise_signal():
            signal.signal(signal.SIGINT, signal.SIG_IGN)

        self._worker_type = WorkerProcess
        # This is the global manager that will handle all shared memory access
        # See. https://docs.python.org/3/library/multiprocessing.html#multiprocessing.managers.SyncManager
        self._manager = SyncManager()
        self._manager.start(raise_signal)
        # The main manticore lock. Acquire this for accessing shared objects
        # THINKME: we use the same lock to access states lists and shared contexts
        self._lock = self._manager.Condition()
        self._killed = self._manager.Value(bool, False)
        self._running = self._manager.Value(bool, False)
        # List of state ids of States on storage
        self._ready_states = self._manager.list()
        self._terminated_states = self._manager.list()
        self._busy_states = self._manager.list()
        self._killed_states = self._manager.list()
        # The multiprocessing queue is much slower than the deque when it gets full, so we
        # triple the size in order to prevent that from happening.
        self._log_queue = self._manager.Queue(15000)
        self._shared_context = self._manager.dict()
        self._context_value_types = {
            list: self._manager.list,
            dict: self._manager.dict
        }
Exemplo n.º 7
0
class CoordinatorFactory:
    def __init__(self, num_processes: int, host: str, port: int,
                 max_simultaneous_connections: int, as_remote_client: bool):
        """
        :param num_processes: number of parallel executions to be conducted
        :param host: host name of main node that holds the test queue, result queue, etc
        :param port: port of main node to connect to
        :param max_simultaneous_connections: maximum allowed connections at one time to main node;  throttled to
            prevent overloading *multiprocsesing* module and deadlock
        """
        self._host = host
        self._port = port
        self._num_processes = num_processes
        self._max_simultaneous_connections = max_simultaneous_connections
        self._is_local = not as_remote_client

    def launch(self) -> "Coordinator":
        if not self._is_local:
            self._sm = SyncManager(authkey=AUTHKEY)
            self._sm.start()
            coordinator = self._sm.CoordinatorProxy(
                self._num_processes, self._host, self._port,
                self._max_simultaneous_connections, self._is_local)
        else:
            coordinator = Coordinator(self._num_processes, self._host,
                                      self._port,
                                      self._max_simultaneous_connections,
                                      self._is_local)
        client = Orchestrator.Manager(addr=(self._host, self._port))
        client.register_client(coordinator, self._num_processes)
        return coordinator
Exemplo n.º 8
0
def main():
    config = load_config()
    cred = config["drive4data.influx"]
    dry_run = bool(config.get("dry_run", False))

    os.makedirs("out", exist_ok=True)
    with ExitStack() as stack:
        executor = concurrent.futures.ProcessPoolExecutor(max_workers=8)
        stack.enter_context(executor)

        manager = SyncManager()
        manager.start(mgr_init)
        stack.enter_context(manager)

        client = InfluxDBClient(batched=False, async_executor=True, time_epoch=TIME_EPOCH, **cred)
        stack.enter_context(closing(client))

        try:
            if not dry_run:
                client.drop_measurement("trips")
            preprocess_trips(client, executor, manager, dry_run=dry_run)
            if not dry_run:
                client.drop_measurement("charge_cycles")
            preprocess_cycles(client, executor, manager, dry_run=dry_run)
        except:
            executor.shutdown(wait=False)
            raise
Exemplo n.º 9
0
def start_manager_server() -> SyncManager:
    """
    Start a SyncManager

    :return: SyncManager
    """
    for port_nr in reversed(range(8080, 8100)):
        try:
            # If a port is already occupied the SyncManager process will spit out EOFError and OSError messages. The
            # former can be catched, but the latter will still show up. So we first check if a port is available
            # manually
            s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            s.bind(('', port_nr))
            s.close()

            # Create manager
            sm = SyncManager(address=('', port_nr), authkey=b'mpire_dashboard')
            sm.register('get_dashboard_tqdm_dict', get_dashboard_tqdm_dict)
            sm.register('get_dashboard_tqdm_details_dict', get_dashboard_tqdm_details_dict)
            sm.register('get_dashboard_tqdm_lock', get_dashboard_tqdm_lock)
            sm.start()

            # Set host and port number so other processes know where to connect to
            DASHBOARD_MANAGER_HOST.value = b''
            DASHBOARD_MANAGER_PORT.value = port_nr

            return sm

        except OSError:
            # Port is occupied, ignore it and try another
            pass

    raise OSError("All ports (8080-8099) are in use")
Exemplo n.º 10
0
    def __init__(
        self, kube_config, task_queue, result_queue, kube_client, worker_uuid, kube_dbnd
    ):
        super(DbndKubernetesScheduler, self).__init__(
            kube_config, task_queue, result_queue, kube_client, worker_uuid
        )
        self.kube_dbnd = kube_dbnd

        # PATCH watcher communication manager
        # we want to wait for stop, instead of "exit" inplace, so we can get all "not" received messages
        from multiprocessing.managers import SyncManager

        # TODO: why can't we use original SyncManager?
        # Scheduler <-> (via _manager) KubeWatcher
        # if _manager dies inplace, we will not get any "info" from KubeWatcher until shutdown
        self._manager = SyncManager()
        self._manager.start(mgr_init)

        self.watcher_queue = self._manager.Queue()
        self.current_resource_version = 0
        self.kube_watcher = self._make_kube_watcher_dbnd()

        # pod to airflow key (dag_id, task_id, execution_date)
        self.submitted_pods = {}  # type: Dict[str,SubmittedPodState]

        # sending data to databand tracker
        self.metrics_logger = KubernetesMetricsLogger()

        # disappeared pods mechanism
        self.last_disappeared_pods = {}
        self.current_iteration = 1
        # add `k8s-scheduler:` prefix to all log messages
        self._log = PrefixLoggerAdapter("k8s-scheduler", self.log)
Exemplo n.º 11
0
    def __init__(
        self, kube_config, task_queue, result_queue, kube_client, worker_uuid, kube_dbnd
    ):
        super(DbndKubernetesScheduler, self).__init__(
            kube_config, task_queue, result_queue, kube_client, worker_uuid
        )
        self.kube_dbnd = kube_dbnd

        # PATCH watcher communication manager
        # we want to wait for stop, instead of "exit" inplace, so we can get all "not" received messages
        from multiprocessing.managers import SyncManager

        # Scheduler <-> (via _manager) KubeWatcher
        # if _manager dies inplace, we will not get any "info" from KubeWatcher until shutdown
        self._manager = SyncManager()
        self._manager.start(mgr_init)

        self.watcher_queue = self._manager.Queue()
        self.current_resource_version = 0
        self.kube_watcher = self._make_kube_watcher_dbnd()
        # will be used to low level pod interactions
        self.failed_pods_to_ignore = []
        self.running_pods = {}
        self.pod_to_task = {}
        self.metrics_logger = KubernetesMetricsLogger()
Exemplo n.º 12
0
def joinable_returnable_process_manager(n, ):
    '''
    This decorator runs n processes to complete a list of tasks given to the decorated function. It waits until all compuations have completed, and returns a list of results.
    :param n: Number of processes to run in parallel
    :return: List of results
    '''

    manager = SyncManager()
    manager.start()
    return_object = manager.list()

    def faux_decorator(f):
        def wrapper(list_of_parameters):
            q = manager.Queue()
            lock = manager.Lock()
            [q.put(item) for item in list_of_parameters]
            processes = [[
                q.put(None),
                queued_start_process(f, q, lock, return_object)
            ] for i in range(n)]

            [p[1].join() for p in processes]
            return return_object

        return wrapper

    return faux_decorator
Exemplo n.º 13
0
def run_local():
    from multiprocessing.managers import SyncManager

    manager = SyncManager()
    manager.start()
    flags.namespace = manager.Namespace()
    flags.namespace.run = True

    signal.signal(signal.SIGINT, signal_handler)

    print(sys.argv)
    ins = GetYp()
    # ins.getCookie()
    print(ins)
    print("Instance: ", ins)

    update_nl = True
    if "no_namelist" in sys.argv:
        update_nl = False
    if "drain" in sys.argv:
        update_nl = False
    if not update_nl:
        print("Not fetching new names from site!")

    # ins.go(ctrlNamespace=flags.namespace, update_namelist=True)
    ins.go(ctrlNamespace=flags.namespace,
           update_namelist=update_nl,
           local=True)
Exemplo n.º 14
0
def main():

    config = load_config(extra_args_func=gw_args)
    init_logger(config)

    log.info(f'Backend.AI Gateway {__version__}')
    log.info(f'runtime: {env_info()}')

    log_config = logging.getLogger('ai.backend.gateway.config')
    log_config.debug('debug mode enabled.')

    if config.debug:
        aiohttp.log.server_logger.setLevel('DEBUG')
        aiohttp.log.access_logger.setLevel('DEBUG')
    else:
        aiohttp.log.server_logger.setLevel('WARNING')
        aiohttp.log.access_logger.setLevel('WARNING')

    asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())

    num_workers = os.cpu_count()
    manager = SyncManager()
    manager.start(lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
    shared_states = manager.Namespace()
    shared_states.lock = manager.Lock()
    shared_states.barrier = manager.Barrier(num_workers)
    shared_states.agent_last_seen = manager.dict()

    try:
        aiotools.start_server(server_main, num_workers=num_workers,
                              extra_procs=[event_router],
                              args=(config, shared_states))
    finally:
        manager.shutdown()
        log.info('terminated.')
Exemplo n.º 15
0
class DbndKubernetesExecutor(KubernetesExecutor):
    def __init__(self, kube_dbnd=None):
        # type: (DbndKubernetesExecutor, DbndKubernetesClient) -> None
        super(DbndKubernetesExecutor, self).__init__()

        from multiprocessing.managers import SyncManager

        self._manager = SyncManager()

        self.kube_dbnd = kube_dbnd
        _update_airflow_kube_config(airflow_kube_config=self.kube_config,
                                    engine_config=kube_dbnd.engine_config)

    def start(self):
        logger.info("Starting Kubernetes executor..")
        self._manager.start(mgr_init)

        dbnd_run = try_get_databand_run()
        if dbnd_run:
            self.worker_uuid = str(dbnd_run.run_uid)
        else:
            self.worker_uuid = (
                KubeWorkerIdentifier.get_or_create_current_kube_worker_uuid())
        self.log.debug("Start with worker_uuid: %s", self.worker_uuid)

        # always need to reset resource version since we don't know
        # when we last started, note for behavior below
        # https://github.com/kubernetes-client/python/blob/master/kubernetes/docs
        # /CoreV1Api.md#list_namespaced_pod
        # KubeResourceVersion.reset_resource_version()
        self.task_queue = self._manager.Queue()
        self.result_queue = self._manager.Queue()

        self.kube_client = self.kube_dbnd.kube_client
        self.kube_scheduler = DbndKubernetesScheduler(
            self.kube_config,
            self.task_queue,
            self.result_queue,
            self.kube_client,
            self.worker_uuid,
            kube_dbnd=self.kube_dbnd,
        )

        if self.kube_dbnd.engine_config.debug:
            self.log.setLevel(logging.DEBUG)
            self.kube_scheduler.log.setLevel(logging.DEBUG)

        self._inject_secrets()
        self.clear_not_launched_queued_tasks()
        self._flush_result_queue()

    # override - by default UpdateQuery not working failing with
    # sqlalchemy.exc.CompileError: Unconsumed column names: state
    # due to model override
    # + we don't want to change tasks statuses - maybe they are managed by other executors
    @provide_session
    def clear_not_launched_queued_tasks(self, *args, **kwargs):
        # we don't clear kubernetes tasks from previous run
        pass
Exemplo n.º 16
0
 def setUp(self):
     if not hasattr(self, 'manager'):
         self.manager = SyncManager()
         self.manager.start(
             lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
     l = linux.Linux('/bin/ls')
     self.state = State(ConstraintSet(), l)
     self.lock = self.manager.Condition()
Exemplo n.º 17
0
 def _run_server(cls, *args):
     # make sure that the server ignores SIGINT (KeyboardInterrupt)
     signal.signal(signal.SIGINT, signal.SIG_IGN)
     # prevent connection errors to trigger exceptions
     try:
         SyncManager._run_server(*args)
     except socket.error:
         pass
Exemplo n.º 18
0
 def _run_server(cls, *args):
     # make sure that the server ignores SIGINT (KeyboardInterrupt)
     signal.signal(signal.SIGINT, signal.SIG_IGN)
     # prevent connection errors to trigger exceptions
     try:
         SyncManager._run_server(*args)
     except socket.error:
         pass
Exemplo n.º 19
0
 def __init__(self, args):
     super(MultiprocessingContext, self).__init__(args)
     # Configuration directory (i.e., INI files folder)
     self.config_dir = args.i
     # Command line action
     if hasattr(args, 'action'):
         self.action = args.action
     # Input
     self.directory = args.directory
     if hasattr(args, 'dataset_list'):
         self.dataset_list = args.dataset_list
     if hasattr(args, 'dataset_id'):
         self.dataset_id = args.dataset_id
     if hasattr(args, 'incoming'):
         self.incoming = args.incoming
     # Multiprocessing configuration
     self.processes = args.max_processes if args.max_processes <= cpu_count(
     ) else cpu_count()
     self.use_pool = (self.processes != 1)
     # Scan counters
     self.scan_errors = 0
     self.scan_data = 0
     self.nbsources = 0
     # Process manager
     if self.use_pool:
         self.manager = SyncManager()
         self.manager.start()
         self.progress = self.manager.Value('i', 0)
         Print.BUFFER = self.manager.Value(c_char_p, '')
     else:
         self.progress = Value('i', 0)
     # Stdout lock
     self.lock = Lock()
     # Directory filter (esgmapfile + esgcheckvocab)
     if hasattr(args, 'ignore_dir'):
         self.dir_filter = args.ignore_dir
     # File filters (esgmapfile + esgcheckvocab)
     self.file_filter = []
     if hasattr(args, 'include_files'):
         if args.include_file:
             self.file_filter.extend([(f, True) for f in args.include_file])
         else:
             # Default includes netCDF only
             self.file_filter.append(('^.*\.nc$', True))
     if hasattr(args, 'exclude_file'):
         if args.exclude_file:
             self.file_filter.extend([(f, False)
                                      for f in args.exclude_file])
         else:
             # Default exclude hidden files
             self.file_filter.append(('^\..*$', False))
     # Facet declaration (esgcheckvocab + esgdrs)
     self.set_values = {}
     if hasattr(args, 'set_value') and args.set_value:
         self.set_values = dict(args.set_value)
     self.set_keys = {}
     if hasattr(args, 'set_key') and args.set_key:
         self.set_keys = dict(args.set_key)
Exemplo n.º 20
0
class StateTest(unittest.TestCase):
    _multiprocess_can_split_ = True

    def setUp(self):
        if not hasattr(self, 'manager'):
            self.manager = SyncManager()
            self.manager.start(
                lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
        dirname = os.path.dirname(__file__)
        l = linux.Linux(os.path.join(dirname, 'binaries', 'basic_linux_amd64'))
        self.state = State(ConstraintSet(), l)
        self.lock = self.manager.Condition()

    def test_workspace_save_load(self):
        self.state.constraints.add(True)
        workspace = Workspace(self.lock, 'mem:')
        id_ = workspace.save_state(self.state)
        state = workspace.load_state(id_)

        # Make sure our memory maps come back through serialization
        for left, right in zip(sorted(self.state.mem._maps),
                               sorted(state.mem._maps)):
            self.assertEqual(left.start, right.start)
            self.assertEqual(left.end, right.end)
            self.assertEqual(left.name, right.name)

        # Check constraints
        self.assertEqual(str(state.constraints), str(self.state.constraints))

    def test_workspace_id_start_with_zero(self):
        workspace = Workspace(self.lock, 'mem:')
        id_ = workspace.save_state(self.state)
        self.assertEquals(id_, 0)

    def test_output(self):
        out = ManticoreOutput('mem:')
        name = 'mytest'
        message = 'custom message'
        out.save_testcase(self.state, name, message)
        workspace = out._store._data

        # Make sure names are constructed correctly
        for entry, data in workspace.items():
            self.assertTrue(entry.startswith(name))
            if 'messages' in entry:
                self.assertTrue(message in data)

        keys = [x.split('.')[1] for x in workspace.keys()]

        for key in self.state.platform.generate_workspace_files():
            self.assertIn(key, keys)

        # Make sure we log everything we should be logging
        self.assertIn('smt', keys)
        self.assertIn('trace', keys)
        self.assertIn('messages', keys)
        self.assertIn('input', keys)
        self.assertIn('pkl', keys)
Exemplo n.º 21
0
 def __init__(self, args):
     # Init print management
     Print.init(log=args.log, debug=args.debug, all=args.all, cmd=args.prog)
     # Print command-line
     Print.command()
     self._process_color_arg(args)
     # Get project and related configuration
     self.project = args.project
     self.config_dir = args.i
     self.processes = args.max_processes if args.max_processes <= cpu_count() else cpu_count()
     self.use_pool = (self.processes != 1)
     self.lock = Lock()
     self.nbfiles = 0
     self.nbskip = 0
     self.nberrors = 0
     self.file_filter = []
     if args.include_file:
         self.file_filter.extend([(f, True) for f in args.include_file])
     else:
         # Default includes netCDF only
         self.file_filter.append(('^.*\.nc$', True))
     if args.exclude_file:
         # Default exclude hidden files
         self.file_filter.extend([(f, False) for f in args.exclude_file])
     else:
         self.file_filter.append(('^\..*$', False))
     self.dir_filter = args.ignore_dir
     # Init process manager
     if self.use_pool:
         manager = SyncManager()
         manager.start()
         Print.BUFFER = manager.Value(c_char_p, '')
         self.progress = manager.Value('i', 0)
     else:
         self.progress = Value('i', 0)
     self.tunits_default = None
     if self.project in DEFAULT_TIME_UNITS.keys():
         self.tunits_default = DEFAULT_TIME_UNITS[self.project]
     # Change frequency increment
     if args.set_inc:
         for table, frequency, increment, units in args.set_inc:
             if table not in set(zip(*FREQ_INC.keys())[0]):
                 raise InvalidTable(table)
             if frequency not in set(zip(*FREQ_INC.keys())[1]):
                 raise InvalidFrequency(frequency)
             keys = [(table, frequency)]
             if table == 'all':
                 keys = [k for k in FREQ_INC.keys() if k[1] == frequency]
             if frequency == 'all':
                 keys = [k for k in FREQ_INC.keys() if k[0] == table]
             for key in keys:
                 FREQ_INC[key] = [float(increment), str(units)]
     # Get reference time properties if submitted
     # Default is to deduce them from first file scanned
     self.ref_calendar = args.calendar
     self.ref_units = args.units
     # Init collector
     self.sources = None
Exemplo n.º 22
0
 def setUp(self):
     if not hasattr(self, 'manager'):
         self.manager = SyncManager()
         self.manager.start(
             lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
     dirname = os.path.dirname(__file__)
     l = linux.Linux(os.path.join(dirname, 'binaries', 'basic_linux_amd64'))
     self.state = State(ConstraintSet(), l)
     self.lock = self.manager.Condition()
Exemplo n.º 23
0
    def get_instance(shared_dictionary: dict = None) -> Configurator:
        # print('PID:' + str(os.getpid()) + ' Config generated. __config Exist?' + str(ConfigurationFactory.__config is not None))
        if ConfigurationFactory.__config is None:
            SyncManager.register('ManagedObjectFactory', ManagedObjectFactory,
                                 ManagedObjectFactoryProxy)
        if ConfigurationFactory.__config is None or shared_dictionary is not None:
            ConfigurationFactory.__config = _BaseConfigurator(
                shared_dictionary=shared_dictionary)

        return ConfigurationFactory.__config
Exemplo n.º 24
0
 def __init__(self, *args, **kwargs):
     # init ingestor process, create tweet queue
     manager = SyncManager()
     manager.start(mgr_init)
     self.tweet_queue = manager.Queue()
     self.ingestion_process = multiprocessing.Process(target=do_ingestion, args=(self.tweet_queue,))
     self.ingestion_process.start()
     
     # call superclass init
     tweepy.StreamListener.__init__(self, *args, **kwargs)
Exemplo n.º 25
0
    def __init__(self, queue_size=None):
        #print "instantiating process manager"
        SyncManager.__init__(self)

        self.start()

        self.ff = None
        self.input_queue = self.Queue(queue_size)
        self.output_queue = self.Queue(queue_size)
        self.worker = None
    def __init__(self, queue_size=None):
        #print "instantiating process manager"
        SyncManager.__init__(self)

        self.start()

        self.ff = None
        self.input_queue = self.Queue(queue_size)
        self.output_queue = self.Queue(queue_size)
        self.worker = None
Exemplo n.º 27
0
   def connect(self):
      """
      Attempts to connect to the Queue on the host/port for which the DFS was initialized for.
      If no connection can be made, DFS will keep attempting to connect until a connection
      can be established.  One connection is established the remove methods requested will be
      registered.
      """

      # remove connection from cache:
      # BaseProxy class has thread local storage which caches the connection
      # which is reused for future connections causing "borken pipe" errors on
      # creating new manager.
      if self.queue in BaseProxy._address_to_local:
         del BaseProxy._address_to_local[self.queue][0].connection

      # register handlers
      SyncManager.register("get_streams")
      SyncManager.register("get_queue")
      SyncManager.register("get_store")
      SyncManager.register("get_properties")

      print "connecting to queue", self.queue
      while self.alive:

         try:
            self.impq= SyncManager(address= self.queue, authkey= self.qauthkey)
            self.impq.connect()
            break
         except (EOFError, IOError, SocketError) as e:
            print "could not connect ...trying again", str(e)
            sleep(1)
Exemplo n.º 28
0
def prepare_experiment(env, args):
    # Manager to share PER between a learner and explorers
    SyncManager.register('PrioritizedReplayBuffer', PrioritizedReplayBuffer)
    manager = SyncManager()
    manager.start()

    kwargs = get_default_rb_dict(args.replay_buffer_size, env)
    kwargs["check_for_update"] = True
    global_rb = manager.PrioritizedReplayBuffer(**kwargs)

    # queues to share network parameters between a learner and explorers
    n_queue = 1 if args.n_env > 1 else args.n_explorer
    n_queue += 1  # for evaluation
    queues = [manager.Queue() for _ in range(n_queue)]

    # Event object to share training status. if event is set True, all exolorers stop sampling transitions
    is_training_done = Event()

    # Lock
    lock = manager.Lock()

    # Shared memory objects to count number of samples and applied gradients
    trained_steps = Value('i', 0)

    return global_rb, queues, is_training_done, lock, trained_steps
Exemplo n.º 29
0
def Manager():
    '''
    Returns a manager associated with a running server process

    The managers methods such as `Lock()`, `Condition()` and `Queue()`
    can be used to create shared objects.
    '''
    from multiprocessing.managers import SyncManager
    m = SyncManager()
    m.start()
    return m
Exemplo n.º 30
0
    def __init__(self, kube_dbnd=None):
        # type: (DbndKubernetesExecutor, DbndKubernetesClient) -> None
        super(DbndKubernetesExecutor, self).__init__()

        from multiprocessing.managers import SyncManager

        self._manager = SyncManager()

        self.kube_dbnd = kube_dbnd
        _update_airflow_kube_config(airflow_kube_config=self.kube_config,
                                    engine_config=kube_dbnd.engine_config)
Exemplo n.º 31
0
    def _finalize_manager(process, *args, **kwargs):
        """Shutdown the manager process."""
        def _join(functor, *args, **kwargs):
            timeout = kwargs.get('timeout')
            if not timeout is None and timeout < 1:
                kwargs['timeout'] = 1

            functor(*args, **kwargs)

        process.join = functools.partial(_join, process.join)
        SyncManager._finalize_manager(process, *args, **kwargs)
Exemplo n.º 32
0
def start_vision_process(manager: SyncManager) -> \
        Tuple[Namespace, Event, Event]:
    ns = manager.Namespace()
    # init ns
    setattr(ns, 'active_config', ConfigMode.GEARS)

    evt = manager.Event()
    sh_evt = manager.Event()
    proc = mp.Process(target=vision_starter, args=(ns, evt, sh_evt))
    proc.start()
    return ns, evt, sh_evt
Exemplo n.º 33
0
def Manager():
    """
    Returns a manager associated with a running server process
    
    The managers methods such as `Lock()`, `Condition()` and `Queue()`
    can be used to create shared objects.
    """
    from multiprocessing.managers import SyncManager
    m = SyncManager()
    m.start()
    return m
Exemplo n.º 34
0
def manager():
    """ Get multiprocessing manager

    Transparently creates a :obj:`multiprocessing.manager.SyncManager` the first time it's invoked. Used for sharing values across multipe concurrently executing Manticore instances.

    """
    global _manager
    if _manager is None:
        _manager = SyncManager()
        _manager.start(lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))
    return _manager
Exemplo n.º 35
0
    def func_server(self):
        p = current_process()
        print('{} id: {}'.format(p.name, id(p)))
        host = '127.0.0.1'
        port = 12345
        authkey = 'authkey'

        # SyncManager.register('get_list', list, proxytype=ListProxy)
        mgr = SyncManager(address=(host, port),
                          authkey=authkey.encode('utf-8'))
        server = mgr.get_server()
        server.serve_forever()
Exemplo n.º 36
0
    def __init__(self, servo_id):
        self.servo_id = servo_id
        self.angle = Value('f', 0.0)
        self.stop_signal = Value('b', False)

        # http://jtushman.github.io/blog/2014/01/14/python-|-multiprocessing-and-interrupts/
        manager = SyncManager() # instead of regular Manager because we want to ignore kb interrupt
        manager.start(Servo.init_mgr) # start the manager explicitly
        self.command_queue = manager.list([])
        self.current_command = manager.dict()

        self.finished = Value('b', False)
Exemplo n.º 37
0
    def __init__(self,
                 initial=None,
                 store=None,
                 policy='random',
                 context=None,
                 **kwargs):
        super().__init__(**kwargs)

        # Signals / Callbacks handlers will be invoked potentially at different
        # worker processes. State provides a local context to save data.

        self.subscribe('did_load_state', self._register_state_callbacks)

        # This is the global manager that will handle all shared memory access among workers
        self.manager = SyncManager()
        self.manager.start(
            lambda: signal.signal(signal.SIGINT, signal.SIG_IGN))

        # The main executor lock. Acquire this for accessing shared objects
        self._lock = self.manager.Condition()

        # Shutdown Event
        self._shutdown = self.manager.Event()

        # States on storage. Shared dict state name ->  state stats
        self._states = self.manager.list()

        # Number of currently running workers. Initially no running workers
        self._running = self.manager.Value('i', 0)

        self._workspace = Workspace(self._lock, store)

        # Executor wide shared context
        if context is None:
            context = {}
        self._shared_context = self.manager.dict(context)

        # scheduling priority policy (wip)
        # Set policy
        policies = {
            'random': Random,
            'uncovered': Uncovered,
            'branchlimited': BranchLimited,
        }
        self._policy = policies[policy](self)
        assert isinstance(self._policy, Policy)

        if self.load_workspace():
            if initial is not None:
                logger.error("Ignoring initial state")
        else:
            if initial is not None:
                self.add(initial)
Exemplo n.º 38
0
class DataSender:

    def __init__(self,phantfile):
        try:
            self.phant = json.load(open(phantfile, 'r'))
        except IOError:
            raise ValueError("Invalid phantfile location")
        self.running = True
        self._manager = SyncManager()


    def start(self):
        self._manager.start(self._mgr_init)
        self._que = self._manager.Queue()
        self._process = Process(target=self.up, args=(self._que,))
        self._process.start()

    def _mgr_init(self):
        signal.signal(signal.SIGINT, signal.SIG_IGN)
        print("initialized manager")

    def up(self,que):
        
        def stop(val,val2):
            print "process SIGINT stopping"
            self.running = False

        signal.signal(signal.SIGINT, stop)
        print('datauploader started')
        while self.running or not que.empty():
            item = json.loads(que.get(True))
            print("handling item={0}".format(item))
            self.httpsend(item)
            que.task_done()
            time.sleep(2)
        print("datauploader process terminating...")

    def send(self, data):
        self._que.put(data)

    def httpsend(self, data):
        postdata = urllib.urlencode(data)
        headers = {'Phant-Private-Key': self.phant['privateKey'] }
        req = urllib2.Request(self.phant['inputUrl'], postdata, headers)
        res = urllib2.urlopen(req)
        content = res.read()
        print("response: {0}".format(content))
    
    def stop(self):
        print("shutting down sender")
        self.running = False
        self._que.join()
        self._process.terminate()
Exemplo n.º 39
0
  def _finalize_manager(process, *args, **kwargs):
    """Shutdown the manager process."""

    def _join(functor, *args, **kwargs):
      timeout = kwargs.get('timeout')
      if not timeout is None and timeout < 1:
        kwargs['timeout'] = 1

      functor(*args, **kwargs)

    process.join = functools.partial(_join, process.join)
    SyncManager._finalize_manager(process, *args, **kwargs)
Exemplo n.º 40
0
 def test_run_keyboard_interrupt(sync_manager: SyncManager) -> None:
     """Function: run should stop by keyboard interupt."""
     queue_process_id = sync_manager.Queue()
     replier = Replier(sync_manager.dict(), queue_process_id)
     loop = asyncio.new_event_loop()
     with ProcessPoolExecutor() as executor:
         future = cast(
             "Future[Any]",
             loop.run_in_executor(executor, run, replier, None,
                                  keyboard_interrupt))
         queue_process_id.get()
     assert not future.get_loop().is_running()
     assert not future.done()
Exemplo n.º 41
0
 def __init__(self, mp_manager: SyncManager, stop_event: threading.Event,
              closing_event: threading.Event, connect_q: queue.Queue,
              result_q: queue.Queue):
     self.connect_q = connect_q
     self.result_q = result_q
     self._stop_event = stop_event
     self._closing_event = closing_event
     self.all_connections_created = mp_manager.Event()
     self.all_connections_closed = mp_manager.Event()
     self.last_all_closed = time.time()
     self.connections: Dict[int, Connection] = {}
     self.sock_filenos: List[int] = []  # cache this, select() takes a list
     self.connected_targets: Dict[Tuple[int, str, int], Connection] = {}
Exemplo n.º 42
0
 def __init__(self):
     self._pid  = os.getpid()
     #abort event
     self._mgr  = SyncManager()
     self._mgr.start(ignore_interrupt)
     self._abort_event = self._mgr.Event()
     #stdout/err
     self._err  = StreamEncoder(sys.stderr)
     self._out  = StreamEncoder(sys.stdout)
     #connection information
     self._port = None
     self._con  = None
     self()
Exemplo n.º 43
0
def get_binary_matrix_from_service(q):
    global matrix_service
    if matrix_service == None:
        matrices = dict()
        matrix_service = SyncManager(address=("localhost", 50000), authkey="")
        SyncManager.register("get_matrix", lambda q: get_matrix(q, matrices))
        Process(target=lambda: matrix_service.get_server().serve_forever()).start()
    SyncManager.register("get_matrix")
    matrix_service.connect()
    return matrix_service.get_matrix(q)
Exemplo n.º 44
0
   def connect(self):

      # register with Queue
      SyncManager.register('getPipeline')
      SyncManager.register('getStore')
      self.qInstance= self.opts.qInstance
      (self.qHost, self.qPort, self.qKey)= self.opts.queue.split(':')
      queue= SyncManager(address= (self.qHost, int(self.qPort)), authkey= self.qKey)
      queue.connect()
      self.pipeline= queue.getPipeline()
      self.store= queue.getStore()
Exemplo n.º 45
0
def init_good_sync_manager():
    from multiprocessing.managers import SyncManager
    #handle SIGINT from SyncManager object
    def mgr_sig_handler(signal, frame):
        print 'not closing the mgr'

    #initilizer for SyncManager
    def mgr_init():
        import signal
        signal.signal(signal.SIGINT, mgr_sig_handler)
        print 'initialized mananger'

    #using syncmanager directly instead of letting Manager() do it for me
    manager = SyncManager()
    manager.start(mgr_init)
Exemplo n.º 46
0
def get_server_queue():
    #FIXME: some OSX users were getting "Can't assign requested address" errors
    # if we use socket.gethostname() for the address. Changing it to
    # 'localhost' seems to fix the issue, but I don't know why. We had to
    # use socket.gethostname() in order to get our benchmark tests to run
    # using qsub on a linux cluster, so with this 'fix', testflo benchmark tests
    # will likely not work on a cluster of OSX machines.
    if sys.platform == 'darwin':
        addr = 'localhost'
    else:
        addr = socket.gethostname()

    manager = SyncManager(address=(addr, 0), authkey=_testflo_authkey)
    manager.start()
    return manager, manager.Queue()
Exemplo n.º 47
0
class Downloader(object):
    def __init__(self, timeout=30, retries=100, wait=1):
        self.timeout = timeout
        self.retries = retries
        self.wait = wait
        
        self.manager = SyncManager()
        self.manager.start()
        
    def retry_fetch_data(self, url):
        market_data = self.fetch_data(url)
        
        retries = 1
        while not market_data and retries < self.retries:
            print "Retry #%s..." % str(retries)
            market_data = self.fetch_data(url)
            if market_data:
                print "Fetched: " + str(len(market_data))
            else:
                print "Fetched nothing!"
            retries += 1
        
        return market_data
    
    def fetch_data(self, url):
        limit = 60
        msg = "Downloading " + url[0: min(limit, len(url))] 
        if len(url) > limit:
            msg += "(+" + str(len(url) - limit) + ")"
        print msg
            
        return_dict = self.manager.dict()
        self.job = Process(target=get_page_data, args=(url, return_dict))
        self.job.start()
        
        self.job.join(self.timeout)
        if self.job.is_alive():
            self.job.terminate()
        self.job = None
        
        market_data = None
        if 'page' in return_dict:
            market_data = return_dict['page']
        
        if self.wait > 0:
            time.sleep(self.wait)
        
        return market_data
Exemplo n.º 48
0
   def connect(self):
      (qHost, qPort, qKey)= self.opts.queue.split(':')
      self.queue= SyncManager(address= (qHost, int(qPort)), authkey= qKey)
      self.queue.connect()
      self.pipeline= self.queue.getPipeline()
      self.store= self.queue.getStore()

      # register with DFS
      self.dfs = None
      self.instances= dict()
      if self.opts.dfs != None:
         SyncManager.register('getInstances')
         (dHost, dPort, dKey)= self.opts.dfs.split(':')
         self.dfs= SyncManager(address= (dHost, int(dPort)), authkey= dKey)
         self.dfs.connect()
         self.instances= self.dfs.getInstances()
Exemplo n.º 49
0
 def __init__(self,phantfile):
     try:
         self.phant = json.load(open(phantfile, 'r'))
     except IOError:
         raise ValueError("Invalid phantfile location")
     self.running = True
     self._manager = SyncManager()
Exemplo n.º 50
0
    def __int__(self, name='default', address=None, authkey=None):
        """
        This is the default constructor for the class.

        :param name:    The manager name
        :param address: The address of the server
        :param authkey: The auth key
        :return:
        """

        # Set internals
        self.__name     = name

        # Override the manager
        SyncManager.__init__(address, authkey)
        return
Exemplo n.º 51
0
   def __init__(self, address, authkey, taskdir= "tasks", id= None, **properties):
      """Creates a stream and retrieves the streams priority queue and data-store."""

      self.id= id if id else str(uuid1())
      self.ipaddress= getipaddress()

      self.address= address
      self.taskdir= path.join(taskdir, self.id)
      self.properties= properties

      self.impq= SyncManager(address= self.address, authkey= authkey)
      self.impq.register("get_streams")
      self.impq.register("create_stream")
      self.impq.register("delete_stream")
      self.impq.register("get_store")
      self.impq.register("get_queue")
      self.impq.connect()

      self.jobs= []
      self.impq.create_stream(id= self.id, ipaddress= self.ipaddress, **properties)
      self.store= self.impq.get_store(id= self.id)
      self.queue= self.impq.get_queue(id= self.id)
      self.alive= True
      self._current_thread= None
      self._lock= Lock()
      self.threads= []
      self.errors= {}
      self.ready= {}
      self._progress= {}


      try:
         makedirs(self.taskdir)
      except:
         pass
Exemplo n.º 52
0
 def __init__(self, timeout=30, retries=100, wait=1):
     self.timeout = timeout
     self.retries = retries
     self.wait = wait
     
     self.manager = SyncManager()
     self.manager.start()
Exemplo n.º 53
0
	def __init__(self, cookie_file, url_queue_size, pg_queue_size, nr_downloadprocess, nr_parserprocess):
		super(SpiderEngine, self).__init__()

		self.logger = logging.getLogger(self.__class__.__name__)

		self.multiprocess_manager = SyncManager()#SyncManager(('',58585))
		self.multiprocess_manager.start()

		self.lck4urlq=self.multiprocess_manager.Lock()
		self.lck4pageq=self.multiprocess_manager.Lock()
		# event for suprocess to initiative exit.
		self.shutdown=self.multiprocess_manager.Event()

		self.url_queue=Queue(url_queue_size)
		self.page_queue=Queue(pg_queue_size)
		self.url_hist=self.multiprocess_manager.dict()
		self.urls= UrlScheduler(self.url_queue, self.url_hist, self.lck4urlq)

		# init multiprocess log
		self.mlog=get_logger()
		mhandler=logging.StreamHandler()
		mhandler.setFormatter(logging.Formatter('%(processName)s %(funcName)s() | %(message)s', '%H:%M:%S'))
		self.mlog.addHandler(mhandler)
		self.mlog.setLevel(logging.INFO)

		self.pages= PageScheduler(self.urls, self.page_queue, self.lck4pageq)
		self.downloader= PageDownloader(cookie_file, self.urls, self.pages, self.shutdown, self.multiprocess_manager, nr_downloadprocess, self.mlog)
		self.parser=PageParser(self.urls, self.pages, self.shutdown, self.multiprocess_manager, nr_parserprocess, self.mlog)
Exemplo n.º 54
0
    def __int__(self, address=MANAGER_ADDRESS, authkey=MANAGER_PASSWORD):
        """
        This is the default constructor for the
        class that registers the valid methods that
        can be called in the manager context.

        :param address:         The address as a tuple (address, port)
        :param auth:            The authentication for the server
        :return:
        """

        self.register("get_MqttRxBuffer")
        self.register("get_MqttTxBuffer")
        self.register("get_RawRxBuffer")
        self.register("get_RawTxBuffer")

        # Override the base class
        SyncManager.__init__(address, authkey)
        return
Exemplo n.º 55
0
    def __init__(self, address=MANAGER_ADDRESS, pwd=MANAGER_PASSWORD):
        """
        This is the constructor for the class object that takes
        both the managers address and password.

        :param address:     The address of the manager server
        :param pwd:         The managers password access
        :return:
        """

        # Override the base class
        SyncManager.__init__(address, pwd)
        Singleton.__init__(self)

        # Register the classes needed
        self.register("get_MqttRxBuffer", callable=lambda: self.__MqttRxBuffer)
        self.register("get_MqttTxBuffer", callable=lambda: self.__MqttTxBuffer)
        self.register("get_RawRxBuffer", callable=lambda: self.__RawTxBuffer)
        self.register("get_RawTxBuffer", callable=lambda: self.__RawRxBuffer)
        return
Exemplo n.º 56
0
    def __init__(self, name="TaskList", mix_ins=None,
                        prefix='ask_', *args, **kwds):
        SyncManager.__init__(self)

        self._name = name
        self._mix_ins = mix_ins
        self._prefix = prefix
        self._args = args
        self._kwds = kwds
        
        x = []
        if mix_ins:
            for m in mix_ins:
                for attr in dir(m):
                    if attr.startswith(prefix):
                        x.append(attr)
        self.register(
            'TaskList',
            callable=TaskList,
            exposed=(x))
Exemplo n.º 57
0
    def create(self):
        """Methods creates queue server

        Args:
           none

        Returns:
           void

        Raises:
           error: ValueError

        """

        if self.__type != queue.QUEUE_TYPE_SERVER:
            raise ValueError(
                'This operation cannot be done on this queue type')

        q = Queue()
        SyncManager.register('get_queue', callable=lambda: q)
        self.__manager = SyncManager(self.__address, self.__authkey)
        self.__manager.start()
Exemplo n.º 58
0
    def connect(self):
        """Methods connects to queue

        Args:
           none

        Returns:
           void

        Raises:
           error: ValueError

        """

        if self.__type != queue.QUEUE_TYPE_CLIENT:
            raise ValueError(
                'This operation cannot be done on this queue type')

        q = Queue()
        SyncManager.register('get_queue', callable=lambda: q)
        self.__manager = SyncManager(self.__address, self.__authkey)
        self.__manager.connect()
Exemplo n.º 59
0
   def __init__(self, address, authkey, logdir= curdir, piddir= curdir):

      self.streams= {}
      self.address= address
      self.manager= SyncManager(address= self.address, authkey= authkey)
      self.manager.register("create_stream", callable= self.create_stream)
      self.manager.register("delete_stream", callable= self.delete_stream)
      self.manager.register("get_streams", callable= lambda: self.streams, proxytype= DictProxy)
      self.manager.register("get_store", callable= lambda id: self.streams[id].store, proxytype= DictProxy)
      self.manager.register("get_queue", callable= lambda id: self.streams[id].queue, proxytype= PriorityQueue)
      self.manager.register("get_properties", callable= lambda id: self.streams[id].properties, proxytype= DictProxy)

      super(Queue, self).__init__(
         pidfile= path.join(piddir, self.__class__.__name__ + ".pid"),
         stdout= path.join(logdir, self.__class__.__name__ + ".out"),
         stderr= path.join(logdir, self.__class__.__name__ + ".err"),
         stdin= path.join(logdir, self.__class__.__name__ + ".in")
      )
Exemplo n.º 60
0
   def __init__(self, address, authkey, queue, qauthkey, mnon, mpps, ec2= None, bootstrap= None, deploykey= None, logdir= curdir, piddir= curdir):
      """
      Initializes the available remote methods 
      and I/O streams to be used for the method.  
      Establishes connection to the Queue. 
      """
      super(DFS, self).__init__(
         pidfile= path.join(piddir, self.__class__.__name__ + ".pid"),
         stdout= path.join(logdir, self.__class__.__name__ + ".out"),
         stderr= path.join(logdir, self.__class__.__name__ + ".err"),
         stdin= path.join(logdir, self.__class__.__name__ + ".in")
      )

      self.id= getipaddress()

      self.address= address
      self.authkey= authkey

      self.queue= queue
      self.qauthkey= qauthkey

      self.mnon= mnon
      self.mpps= mpps
      self.bootstrap= bootstrap
      self.deploykey= deploykey

      self.nodes= {}

      self.ec2= ec2
      if self.ec2 != None:
         (self.access_key, self.security_key, self.ami_id, self.security_group, self.key_name, self.instance_type)= self.ec2.split(',')
         self.ec2= EC2Connection(self.access_key, self.security_key)
         print "Connected to EC2", self.ec2

      self.alive= True

      self.manager= SyncManager(address= self.address, authkey= self.authkey)
      self.manager.register("get_nodes", callable= lambda: self.nodes, proxytype= DictProxy)

      self.connect()