Exemple #1
0
    def __init__(self, channel, prefetch_count=0):
        self.channel = channel
        self.prefetch_count = prefetch_count or 0

        self._delivered = OrderedDict()
        self._delivered.restored = False
        self._dirty = set()
        self._quick_ack = self._dirty.add
        self._quick_append = self._delivered.__setitem__
        self._on_collect = Finalize(self, self.restore_unacked_once, exitpriority=1)
Exemple #2
0
        def __init__(self, address, backlog=None):
            self._address = address
            self._handle_queue = [self._new_handle(first=True)]

            self._last_accepted = None
            sub_debug('listener created with address=%r', self._address)
            self.close = Finalize(self,
                                  PipeListener._finalize_pipe_listener,
                                  args=(self._handle_queue, self._address),
                                  exitpriority=0)
 def __init__(self, address, backlog=None):
     self._address = address
     handle = win32.CreateNamedPipe(address, win32.PIPE_ACCESS_DUPLEX, win32.PIPE_TYPE_MESSAGE | win32.PIPE_READMODE_MESSAGE | win32.PIPE_WAIT, win32.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE, win32.NMPWAIT_WAIT_FOREVER, win32.NULL)
     self._handle_queue = [
      handle]
     self._last_accepted = None
     sub_debug('listener created with address=%r', self._address)
     self.close = Finalize(self, PipeListener._finalize_pipe_listener, args=(
      self._handle_queue, self._address), exitpriority=0)
     return
Exemple #4
0
def build_pool(nproc, log_file_name=None):
    """
    Make a multiprocessing pool object and a shared log (either a file or 
    stdout) object. 

    nproc - number of processes
    log_file_name - name/None

    returns pool, log
    """
    print('Building pool with', nproc, 'processors')
    lock = mp.Lock()  # a lock we can prevent collisions with
    proc_counter = mp.Value('i', 0)

    if log_file_name is None:
        buf = sys.stdout
    else:
        if os.path.exists(log_file_name):
            os.remove(log_file_name)
        buf = open(log_file_name, 'a')

    # Acquire a lock before writing a line
    buf = LockedLineIO([buf], lock)
    log = PrefixedIO(buf, str(' ' * 1))
    log = VerboseTimingLog(bufs=[log], also_stdout=False, insert_timings=True)

    # make as many pipes as processors (pass all-to-all, will dynamically use)
    connections = [mp.Pipe() for i in range(nproc)]
    slave_connections = [c[1] for c in connections]
    my_connections = [c[0] for c in connections]

    t = time()

    pool = mp.Pool(nproc,
                   initializer=init_worker,
                   initargs=(lock, proc_counter, t, log_file_name,
                             slave_connections))
    for i in range(nproc):
        obj = my_connections[0].recv()
        print(obj, file=log)
    print('Built worker pool with {:,} processors'.format(proc_counter.value),
          file=log)

    global _myproc
    _myproc = MyProc(log, lock, t, my_connections)
    global _free_connections
    _free_connections = list(range(nproc))

    # prevent exceptions on teardown by cleaning up logs
    def cleanup_log():
        """ Cleanup the logs """
        get_log().close()

    Finalize(None, cleanup_log, exitpriority=16)
    return pool, log
Exemple #5
0
    def __init__(self, processes=None, initializer=None, initargs=(),
                 maxtasksperchild=None):
        self._setup_queues()
        self._taskqueue = Queue.Queue()
        self._cache = {}
        self._state = RUN
        self._maxtasksperchild = maxtasksperchild
        self._initializer = initializer
        self._initargs = initargs

        if processes is None:
            try:
                processes = cpu_count()
            except NotImplementedError:
                processes = 1

        if initializer is not None and not hasattr(initializer, '__call__'):
            raise TypeError('initializer must be a callable')

        self._processes = processes
        self._pool = []
        self._repopulate_pool()

        self._worker_handler = threading.Thread(
            target=Pool._handle_workers,
            args=(self, )
            )
        self._worker_handler.daemon = True
        self._worker_handler._state = RUN
        self._worker_handler.start()


        self._task_handler = threading.Thread(
            target=Pool._handle_tasks,
            args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
            )
        self._task_handler.daemon = True
        self._task_handler._state = RUN
        self._task_handler.start()

        self._result_handler = threading.Thread(
            target=Pool._handle_results,
            args=(self._outqueue, self._quick_get, self._cache)
            )
        self._result_handler.daemon = True
        self._result_handler._state = RUN
        self._result_handler.start()

        self._terminate = Finalize(
            self, self._terminate_pool,
            args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
                  self._worker_handler, self._task_handler,
                  self._result_handler, self._cache),
            exitpriority=15
            )
Exemple #6
0
    def __init__(self, processes=None, initializer=None, initargs=()):
        self._setup_queues()
        self._taskqueue = Queue.Queue()
        self._cache = {}
        self._state = RUN
        self._initializer = initializer
        self._initargs = initargs

        if processes is None:
            try:
                processes = cpu_count()
            except NotImplementedError:
                processes = 1
        self._size = processes

        if initializer is not None and not hasattr(initializer, '__call__'):
            raise TypeError('initializer must be a callable')

        self._pool = []
        for i in range(processes):
            self._create_worker_process()

        self._task_handler = threading.Thread(
            target=Pool._handle_tasks,
            args=(self._taskqueue, self._quick_put, self._outqueue,
                  self._pool))
        self._task_handler.daemon = True
        self._task_handler._state = RUN
        self._task_handler.start()

        # Thread processing acknowledgements form the ackqueue.
        self._ack_handler = threading.Thread(target=Pool._handle_ack,
                                             args=(self._ackqueue,
                                                   self._quick_get_ack,
                                                   self._cache))
        self._ack_handler.daemon = True
        self._ack_handler._state = RUN
        self._ack_handler.start()

        # Thread processing results in the outqueue.
        self._result_handler = threading.Thread(target=Pool._handle_results,
                                                args=(self._outqueue,
                                                      self._quick_get,
                                                      self._cache))
        self._result_handler.daemon = True
        self._result_handler._state = RUN
        self._result_handler.start()

        self._terminate = Finalize(
            self,
            self._terminate_pool,
            args=(self._taskqueue, self._inqueue, self._outqueue,
                  self._ackqueue, self._pool, self._ack_handler,
                  self._task_handler, self._result_handler, self._cache),
            exitpriority=15)
Exemple #7
0
 def __init__(self, file_, callbackfn=None,
              desc='daemon lock', debug=False):
     self.pidfile = file_
     self.callbackfn = callbackfn
     self.desc = desc
     self.debug = debug
     self.held = False
     # run the lock automatically!
     self.lock()
     self._finalize = Finalize(self, DaemonLock._on_finalize,
                               args=(self, debug), exitpriority=10)
def pool_init():
    global conn
    global api

    conn = pymysql.connect(host='localhost', user='******', password='******', db='vk17', charset='utf8',
                           cursorclass=pymysql.cursors.DictCursor)

    session = vk.Session()
    api = vk.API(session, v='5.62', lang='ru')

    Finalize(conn, pool_close, exitpriority=0)
Exemple #9
0
def get_temp_dir():
    '''
    Function returning a temp directory which will be removed on exit
    '''
    # get name of a temp directory which will be automatically cleaned up
    if current_process()._tempdir is None:
        tempdir = tempfile.mkdtemp(prefix='pymp-')
        info('created temp directory %s', tempdir)
        Finalize(None, shutil.rmtree, args=[tempdir], exitpriority=-100)
        current_process()._tempdir = tempdir
    return current_process()._tempdir
Exemple #10
0
 def _start_thread(
         self):  # Start thread which transfers data from buffer to pipe
     self._buffer.clear()
     self._thread = threading.Thread(
         target=Queue._feed,
         args=(self._buffer, self._notempty, self._send_bytes, self._wlock,
               self._writer.close, self._ignore_epipe, self._sem),
         name='QueueFeederThread',
         daemon=True,
     )
     self._thread.start()
     if not self._joincancelled:
         self._jointhread = Finalize(self._thread,
                                     Queue._finalize_join,
                                     [weakref.ref(self._thread)],
                                     exitpriority=-5)
     self._close = Finalize(
         self,
         Queue._finalize_close, [self._buffer, self._notempty],
         exitpriority=10
     )  # Send sentinel to the thread queue object when garbage collected
Exemple #11
0
 def _start_thread(self):
     debug('Queue._start_thread()')
     self._buffer.clear()
     self._thread = threading.Thread(target=Queue._feed,
                                     args=(self._buffer, self._notempty,
                                           self._send, self._wlock,
                                           self._writer.close),
                                     name='QueueFeederThread')
     self._thread.daemon = True
     debug('doing self._thread.start()')
     self._thread.start()
     debug('... done self._thread.start()')
     if not self._joincancelled:
         self._jointhread = Finalize(self._thread,
                                     Queue._finalize_join,
                                     [weakref.ref(self._thread)],
                                     exitpriority=-5)
     self._close = Finalize(self,
                            Queue._finalize_close,
                            [self._buffer, self._notempty],
                            exitpriority=10)
Exemple #12
0
        def func_after_fork(tracer):
            def exit_routine():
                self.exit_routine()

            from multiprocessing.util import Finalize
            import signal
            Finalize(tracer, exit_routine, exitpriority=32)

            def term_handler(signalnum, frame):
                self.exit_routine()

            signal.signal(signal.SIGTERM, term_handler)
Exemple #13
0
 def make_instance(stages, context, store):
     SequentialWorker.instance = SequentialWorker(stages,
                                                  context,
                                                  global_store=store)
     SequentialWorker.instance.__enter__()
     # https://stackoverflow.com/a/24724452
     Finalize(
         SequentialWorker.instance,
         SequentialWorker.instance.__exit__,
         args=(None, None, None),
         exitpriority=10,
     )
Exemple #14
0
 def __init__(self, address, family, backlog=1):
     self._socket = socket.socket(getattr(socket, family))
     self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
     self._socket.bind(address)
     self._socket.listen(backlog)
     self._address = self._socket.getsockname()
     self._family = family
     self._last_accepted = None
     if family == 'AF_UNIX':
         self._unlink = Finalize(self, os.unlink, args=(address,), exitpriority=0)
     else:
         self._unlink = None
     return
Exemple #15
0
    def run(self):
        '''
        Bind the pub and pull sockets for events
        '''
        salt.utils.appendproctitle(self.__class__.__name__)
        self.io_loop = tornado.ioloop.IOLoop()
        with salt.utils. async .current_ioloop(self.io_loop):
            if self.opts['ipc_mode'] == 'tcp':
                epub_uri = int(self.opts['tcp_master_pub_port'])
                epull_uri = int(self.opts['tcp_master_pull_port'])
            else:
                epub_uri = os.path.join(self.opts['sock_dir'],
                                        'master_event_pub.ipc')
                epull_uri = os.path.join(self.opts['sock_dir'],
                                         'master_event_pull.ipc')

            self.publisher = salt.transport.ipc.IPCMessagePublisher(
                epub_uri, io_loop=self.io_loop)

            self.puller = salt.transport.ipc.IPCMessageServer(
                epull_uri,
                io_loop=self.io_loop,
                payload_handler=self.handle_publish,
            )

            # Start the master event publisher
            old_umask = os.umask(0o177)
            try:
                self.publisher.start()
                self.puller.start()
                if self.opts['client_acl'] or self.opts['client_acl_blacklist']:
                    salt.utils.warn_until(
                        'Nitrogen',
                        'ACL rules should be configured with \'publisher_acl\' and '
                        '\'publisher_acl_blacklist\' not \'client_acl\' and '
                        '\'client_acl_blacklist\'. This functionality will be removed in Salt '
                        'Nitrogen.')
                if (self.opts['ipc_mode'] != 'tcp' and
                    (self.opts['publisher_acl'] or self.opts['client_acl']
                     or self.opts['external_auth'])):
                    os.chmod(
                        os.path.join(self.opts['sock_dir'],
                                     'master_event_pub.ipc'), 0o666)
            finally:
                os.umask(old_umask)

            # Make sure the IO loop and respective sockets are closed and
            # destroyed
            Finalize(self, self.close, exitpriority=15)

            self.io_loop.start()
Exemple #16
0
    def _init_worker(*args, **kwargs):
        """Initialize a :class:`multiprocessing.Pool` worker.

        Call the Django's ``ParallelTestSuite.init_worker`` and then
        also start the manager infrastructure.
        """
        result = django_init_worker(*args, **kwargs)

        # Further patch channel names and the like with our current pid,
        # so that parallel managers and executors don't clash on the
        # same channels and directories.
        resolwe_settings.FLOW_MANAGER_SETTINGS['REDIS_PREFIX'] += '-parallel-pid{}'.format(os.getpid())

        _create_test_dirs()

        overrides = _prepare_settings()
        overrides.__enter__()
        Finalize(overrides, lambda: overrides.__exit__(None, None, None), exitpriority=16)

        _manager_setup()

        state_cleanup = AtScopeExit(manager.state.destroy_channels)
        state_cleanup.__enter__()
        Finalize(state_cleanup, state_cleanup.__exit__, exitpriority=16)

        listener = CommandContext('runlistener', '--clear-queue')
        listener.__enter__()
        Finalize(listener, listener.__exit__, exitpriority=16)

        workers = CommandContext('runworker', only_channels=[state.MANAGER_CONTROL_CHANNEL])
        workers.__enter__()
        Finalize(workers, workers.__exit__, exitpriority=16)

        signal_override = override_settings(FLOW_MANAGER_SYNC_AUTO_CALLS=True)
        signal_override.__enter__()
        Finalize(signal_override, lambda: signal_override.__exit__(None, None, None), exitpriority=16)

        return result
    def __init__(self, *args, **kwargs):
        """Initialize the database scheduler."""
        self.app = kwargs['app']
        self.dburi = kwargs.get('dburi') or self.app.conf.get(
            'beat_dburi') or DEFAULT_BEAT_DBURI
        self.engine, self.Session = session_manager.create_session(self.dburi)
        session_manager.prepare_models(self.engine)

        self._dirty = set()
        Scheduler.__init__(self, *args, **kwargs)
        self._finalize = Finalize(self, self.sync, exitpriority=5)
        self.max_interval = (kwargs.get('max_interval')
                             or self.app.conf.beat_max_loop_interval
                             or DEFAULT_MAX_INTERVAL)
Exemple #18
0
def sqp_initializer(*args):
    """The initializer used by the Scrape Quotes Pool"""
    # Preclude the need for worker processes to even care about KeyboardInterrupt in the first place.
    signal.signal(signal.SIGINT, signal.SIG_IGN)

    global driver_manager
    driver_creator = args[0]
    scraping_config = args[1]
    driver_manager = DriverManager(driver_creator, scraping_config)
    driver_manager.__enter__()
    Finalize(driver_manager, driver_manager.__exit__, exitpriority=16)

    log_queue = args[2]
    configure_log_dispatcher(log_queue)
Exemple #19
0
    def run(self):
        '''
        Bind the pub and pull sockets for events
        '''
        salt.utils.process.appendproctitle(self.__class__.__name__)
        self.io_loop = tornado.ioloop.IOLoop()
        with salt.utils.asynchronous.current_ioloop(self.io_loop):
            if self.opts['ipc_mode'] == 'tcp':
                epub_uri = int(self.opts['tcp_master_pub_port'])
                epull_uri = int(self.opts['tcp_master_pull_port'])
            else:
                epub_uri = os.path.join(
                    self.opts['sock_dir'],
                    'master_event_pub.ipc'
                )
                epull_uri = os.path.join(
                    self.opts['sock_dir'],
                    'master_event_pull.ipc'
                )

            self.publisher = salt.transport.ipc.IPCMessagePublisher(
                self.opts,
                epub_uri,
                io_loop=self.io_loop
            )

            self.puller = salt.transport.ipc.IPCMessageServer(
                epull_uri,
                io_loop=self.io_loop,
                payload_handler=self.handle_publish,
            )

            # Start the master event publisher
            old_umask = os.umask(0o177)
            try:
                self.publisher.start()
                self.puller.start()
                if (self.opts['ipc_mode'] != 'tcp' and (
                        self.opts['publisher_acl'] or
                        self.opts['external_auth'])):
                    os.chmod(os.path.join(
                        self.opts['sock_dir'], 'master_event_pub.ipc'), 0o666)
            finally:
                os.umask(old_umask)

            # Make sure the IO loop and respective sockets are closed and
            # destroyed
            Finalize(self, self.close, exitpriority=15)

            self.io_loop.start()
Exemple #20
0
    def __init__(self, file_=None, callbackfn=None,
                 desc='daemon lock', debug=False):

        self.pidfile = file_ if file_ else os.path.join(
                                                    os.path.dirname(__file__),
                                                    'running.lock')
        self.callbackfn = callbackfn
        self.desc = desc
        self.debug = debug
        self.held = False
        #run the lock automatically !
        self.lock()
        self._finalize = Finalize(self, DaemonLock._on_finalize,
                                    args=(self, debug), exitpriority=10)
Exemple #21
0
    def _start_thread(self):
        debug('Queue._start_thread()')

        # Start thread which transfers data from buffer to pipe
        self._buffer.clear()
        self._thread = threading.Thread(
            target=Queue._feed,
            args=(self._buffer, self._notempty, self._send,
                  self._wlock, self._writer.close),
            name='QueueFeederThread'
            )
        self._thread.daemon = True

        debug('doing self._thread.start()')
        self._thread.start()
        debug('... done self._thread.start()')

        # On process exit we will wait for data to be flushed to pipe.
        #
        # However, if this process created the queue then all
        # processes which use the queue will be descendants of this
        # process.  Therefore waiting for the queue to be flushed
        # is pointless once all the child processes have been joined.
        created_by_this_process = (self._opid == os.getpid())
        if not self._joincancelled and not created_by_this_process:
            self._jointhread = Finalize(
                self._thread, Queue._finalize_join,
                [weakref.ref(self._thread)],
                exitpriority=-5
                )

        # Send sentinel to the thread queue object when garbage collected
        self._close = Finalize(
            self, Queue._finalize_close,
            [self._buffer, self._notempty],
            exitpriority=10
            )
Exemple #22
0
 def extractor_pool(self, func, iterable):
     '''
     Extract items (billard multiprocessing use)
     :param func: function
     :param iterable: list
     '''
     _finalizers = list()
     p = Pool(processes=cpu_count())
     _finalizers.append(Finalize(p, p.terminate))
     try:
         p.map_async(func, iterable)
         p.close()
         p.join()
     finally:
         p.terminate()
Exemple #23
0
    def _start_thread(self):
        debug("Queue._start_thread()")

        # Start thread which transfers data from buffer to pipe
        self._buffer.clear()
        self._thread = threading.Thread(
            target=MPQueueFixed._feed,
            args=(
                self._buffer,
                self._notempty,
                self._send,
                self._wlock,
                self._writer.close,
            ),
            name="QueueFeederThread",
        )
        self._thread.daemon = True

        debug("doing self._thread.start()")
        self._thread.start()
        debug("... done self._thread.start()")

        # On process exit we will wait for data to be flushed to pipe.
        if not self._joincancelled:
            self._jointhread = Finalize(
                self._thread,
                Queue._finalize_join,
                [weakref.ref(self._thread)],
                exitpriority=-5,
            )

        # Send sentinel to the thread queue object when garbage collected
        self._close = Finalize(self,
                               Queue._finalize_close,
                               [self._buffer, self._notempty],
                               exitpriority=10)
Exemple #24
0
 def __init__(self, *args, **kwargs):
     database_proxy.initialize(kwargs['app'].database)
     self.database_proxy = database_proxy
     """Initialize the database scheduler."""
     self._dirty = set()
     Scheduler.__init__(self, *args, **kwargs)
     self._finalize = Finalize(self, self.sync, exitpriority=5)
     self.max_interval = (kwargs.get('max_interval')
                          or self.app.conf.beat_max_loop_interval
                          or DEFAULT_MAX_INTERVAL)
     self.database_proxy.create_tables([
         PeriodicTask, PeriodicTasks, CrontabSchedule, IntervalSchedule,
         SolarSchedule
     ],
                                       safe=True)
Exemple #25
0
    def __init__(self, processes=None, initializer=None, initargs=()):
        self._setup_queues()
        self._taskqueue = queue.Queue()
        self._cache = {}
        self._state = RUN

        if processes is None:
            try:
                processes = cpu_count()
            except NotImplementedError:
                processes = 1

        if initializer is not None and not hasattr(initializer, '__call__'):
            raise TypeError('initializer must be a callable')

        self._pool = []
        for i in range(processes):
            w = self.Process(
                target=worker,
                args=(self._inqueue, self._outqueue, initializer, initargs)
                )
            self._pool.append(w)
            w.name = w.name.replace('Process', 'PoolWorker')
            w.daemon = True
            w.start()

        self._task_handler = threading.Thread(
            target=Pool._handle_tasks,
            args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
            )
        self._task_handler.daemon = True
        self._task_handler._state = RUN
        self._task_handler.start()

        self._result_handler = threading.Thread(
            target=Pool._handle_results,
            args=(self._outqueue, self._quick_get, self._cache)
            )
        self._result_handler.daemon = True
        self._result_handler._state = RUN
        self._result_handler.start()

        self._terminate = Finalize(
            self, self._terminate_pool,
            args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
                  self._task_handler, self._result_handler, self._cache),
            exitpriority=15
            )
Exemple #26
0
    def start_monitor_thread(self):
        "Run check_subprocesses in a seperate thread. Kill it gracefully."
        from threading import Thread

        t = Thread(target=self.check_subprocesses)
        t.setDaemon(True)
        t.start()

        def kill_thread(self):
            "Let the child process checker finish"
            self.__stopping = True
            t.join()

        self.__term = Finalize(self,
                               kill_thread,
                               args=(self, ),
                               exitpriority=20)
Exemple #27
0
        def func_after_fork(tracer):

            def exit_routine():
                self.exit_routine()

            from multiprocessing.util import Finalize
            import signal
            Finalize(tracer, exit_routine, exitpriority=32)

            def term_handler(signalnum, frame):
                self.exit_routine()
            signal.signal(signal.SIGTERM, term_handler)

            tracer.clear()
            tracer._set_curr_stack_depth(1)

            if tracer._afterfork_cb:
                tracer._afterfork_cb(tracer, *tracer._afterfork_args, **tracer._afterfork_kwargs)
def worker_init_entry_point(
        init_function, arg_queue=None, backup_arg_queue=None):
    kwargs = {}
    if arg_queue:
        try:
            kwargs = arg_queue.get(block=False)
        except queue.Empty:
            print("Argument queue empty. Using round robin arg queue.")
            kwargs = backup_arg_queue.get(block=True)
            backup_arg_queue.put(kwargs)

        # On exit we add the init args back to the queue so restarted workers
        # (e.g. when when running with maxtasksperchild) will pickup init
        # arguments from a previously exited worker.
        Finalize(None, arg_queue.put, (kwargs,), exitpriority=1)

    print("Initializing worker: %s" % str(kwargs))
    init_function(**kwargs)
Exemple #29
0
def init_parallel_runner(*args):
    """
    This initializes a new runner in each child process that calls it. We need to make the runner global so we can
    access it from compute_for_sticker_mp / compute_for_event_mp
    Clean up from:
    https://stackoverflow.com/questions/24717468/context-managers-and-multiprocessing-pools/24724452#24724452
    :param args:
    :return:
    """
    global parallel_runner
    parallel_runner = args[0](extra_feature_classes=args[1], **args[2])

    try:
        current_niceness = os.nice(0)
        os.nice(current_niceness - 10)
    except:
        pass

    Finalize(object, clean_up_mp, exitpriority=16)
Exemple #30
0
def pool_init(access_tokens, shared_countries, shared_cities, shared_count):
    global conn
    global api
    global countries
    global cities
    global count

    conn = pymysql.connect(host='localhost', user='******', password='******', db='vk17', charset='utf8')

    pname, _, pid = current_process().name.partition('-')
    access_token = access_tokens[int(pid) % len(access_tokens)]

    session = vk.Session(access_token=access_token)
    api = vk.API(session, v='5.62', lang='ru')

    countries = shared_countries
    cities = shared_cities
    count = shared_count

    Finalize(conn, conn.close, exitpriority=0)
Exemple #31
0
def setup_worker(f, *args):

    global resource

    url, schema = args

    f.schema = schema
    Session = sessionmaker()
    f.session = Session()

    resource_cm = open_resource(url, schema)
    E.debug(f"{os.getpid()}: setting up worker for resource={id(resource)}")
    old_resource = resource
    resource = resource_cm.__enter__()
    E.debug(f"{os.getpid()}: new worker for resource={id(resource)} (old_resource={id(old_resource)})")

    # Register a finalizer to flush table cache
    Finalize(resource, resource.__exit__, exitpriority=16)

    E.debug(f"{os.getpid()}: adding cache={id(resource.table_cache)} from resource={id(resource)} "
            f"to worker={id(f)}, session={id(f.session)}")
    f.table_cache = resource.table_cache
Exemple #32
0
class QoS(object):
    """Quality of Service guarantees.

    Only supports `prefetch_count` at this point.

    Arguments:
        channel (ChannelT): Connection channel.
        prefetch_count (int): Initial prefetch count (defaults to 0).
    """

    #: current prefetch count value
    prefetch_count = 0

    #: :class:`~collections.OrderedDict` of active messages.
    #: *NOTE*: Can only be modified by the consuming thread.
    _delivered = None

    #: acks can be done by other threads than the consuming thread.
    #: Instead of a mutex, which doesn't perform well here, we mark
    #: the delivery tags as dirty, so subsequent calls to append() can remove
    #: them.
    _dirty = None

    #: If disabled, unacked messages won't be restored at shutdown.
    restore_at_shutdown = True

    def __init__(self, channel, prefetch_count=0):
        self.channel = channel
        self.prefetch_count = prefetch_count or 0

        self._delivered = OrderedDict()
        self._delivered.restored = False
        self._dirty = set()
        self._quick_ack = self._dirty.add
        self._quick_append = self._delivered.__setitem__
        self._on_collect = Finalize(
            self, self.restore_unacked_once, exitpriority=1,
        )

    def can_consume(self):
        """Return true if the channel can be consumed from.

        Used to ensure the client adhers to currently active
        prefetch limits.
        """
        pcount = self.prefetch_count
        return not pcount or len(self._delivered) - len(self._dirty) < pcount

    def can_consume_max_estimate(self):
        """Return the maximum number of messages allowed to be returned.

        Returns an estimated number of messages that a consumer may be allowed
        to consume at once from the broker.  This is used for services where
        bulk 'get message' calls are preferred to many individual 'get message'
        calls - like SQS.

        Returns:
            int: greater than zero.
        """
        pcount = self.prefetch_count
        if pcount:
            return max(pcount - (len(self._delivered) - len(self._dirty)), 0)

    def append(self, message, delivery_tag):
        """Append message to transactional state."""
        if self._dirty:
            self._flush()
        self._quick_append(delivery_tag, message)

    def get(self, delivery_tag):
        return self._delivered[delivery_tag]

    def _flush(self):
        """Flush dirty (acked/rejected) tags from."""
        dirty = self._dirty
        delivered = self._delivered
        while 1:
            try:
                dirty_tag = dirty.pop()
            except KeyError:
                break
            delivered.pop(dirty_tag, None)

    def ack(self, delivery_tag):
        """Acknowledge message and remove from transactional state."""
        self._quick_ack(delivery_tag)

    def reject(self, delivery_tag, requeue=False):
        """Remove from transactional state and requeue message."""
        if requeue:
            self.channel._restore_at_beginning(self._delivered[delivery_tag])
        self._quick_ack(delivery_tag)

    def restore_unacked(self):
        """Restore all unacknowledged messages."""
        self._flush()
        delivered = self._delivered
        errors = []
        restore = self.channel._restore
        pop_message = delivered.popitem

        while delivered:
            try:
                _, message = pop_message()
            except KeyError:  # pragma: no cover
                break

            try:
                restore(message)
            except BaseException as exc:
                errors.append((exc, message))
        delivered.clear()
        return errors

    def restore_unacked_once(self, stderr=None):
        """Restore all unacknowledged messages at shutdown/gc collect.

        Note:
            Can only be called once for each instance, subsequent
            calls will be ignored.
        """
        self._on_collect.cancel()
        self._flush()
        stderr = sys.stderr if stderr is None else stderr
        state = self._delivered

        if not self.restore_at_shutdown or not self.channel.do_restore:
            return
        if getattr(state, 'restored', None):
            assert not state
            return
        try:
            if state:
                print(RESTORING_FMT.format(len(self._delivered)),
                      file=stderr)
                unrestored = self.restore_unacked()

                if unrestored:
                    errors, messages = list(zip(*unrestored))
                    print(RESTORE_PANIC_FMT.format(len(errors), errors),
                          file=stderr)
                    emergency_dump_state(messages, stderr=stderr)
        finally:
            state.restored = True

    def restore_visible(self, *args, **kwargs):
        """Restore any pending unackwnowledged messages.

        To be filled in for visibility_timeout style implementations.

        Note:
            This is implementation optional, and currently only
            used by the Redis transport.
        """
        pass
Exemple #33
0
class QoS(object):
    """Quality of Service guarantees.

    Only supports `prefetch_count` at this point.

    :param channel: AMQ Channel.
    :keyword prefetch_count: Initial prefetch count (defaults to 0).

    """

    #: current prefetch count value
    prefetch_count = 0

    #: :class:`~collections.OrderedDict` of active messages.
    #: *NOTE*: Can only be modified by the consuming thread.
    _delivered = None

    #: acks can be done by other threads than the consuming thread.
    #: Instead of a mutex, which doesn't perform well here, we mark
    #: the delivery tags as dirty, so subsequent calls to append() can remove
    #: them.
    _dirty = None

    #: If disabled, unacked messages won't be restored at shutdown.
    restore_at_shutdown = True

    def __init__(self, channel, prefetch_count=0):
        self.channel = channel
        self.prefetch_count = prefetch_count or 0

        self._delivered = OrderedDict()
        self._delivered.restored = False
        self._dirty = set()
        self._quick_ack = self._dirty.add
        self._quick_append = self._delivered.__setitem__
        self._on_collect = Finalize(
            self, self.restore_unacked_once, exitpriority=1,
        )

    def can_consume(self):
        """Return true if the channel can be consumed from.

        Used to ensure the client adhers to currently active
        prefetch limits.

        """
        pcount = self.prefetch_count
        return not pcount or len(self._delivered) - len(self._dirty) < pcount

    def append(self, message, delivery_tag):
        """Append message to transactional state."""
        if self._dirty:
            self._flush()
        self._quick_append(delivery_tag, message)

    def get(self, delivery_tag):
        return self._delivered[delivery_tag]

    def _flush(self):
        """Flush dirty (acked/rejected) tags from."""
        dirty = self._dirty
        delivered = self._delivered
        while 1:
            try:
                dirty_tag = dirty.pop()
            except KeyError:
                break
            delivered.pop(dirty_tag, None)

    def ack(self, delivery_tag):
        """Acknowledge message and remove from transactional state."""
        self._quick_ack(delivery_tag)

    def reject(self, delivery_tag, requeue=False):
        """Remove from transactional state and requeue message."""
        if requeue:
            self.channel._restore_at_beginning(self._delivered[delivery_tag])
        self._quick_ack(delivery_tag)

    def restore_unacked(self):
        """Restore all unacknowledged messages."""
        self._flush()
        delivered = self._delivered
        errors = []
        restore = self.channel._restore
        pop_message = delivered.popitem

        while delivered:
            try:
                _, message = pop_message()
            except KeyError:  # pragma: no cover
                break

            try:
                restore(message)
            except BaseException as exc:
                errors.append((exc, message))
        delivered.clear()
        return errors

    def restore_unacked_once(self):
        """Restores all unacknowledged messages at shutdown/gc collect.

        Will only be done once for each instance.

        """
        self._on_collect.cancel()
        self._flush()
        state = self._delivered

        if not self.restore_at_shutdown or not self.channel.do_restore:
            return
        if getattr(state, 'restored', None):
            assert not state
            return
        try:
            if state:
                say('Restoring {0!r} unacknowledged message(s).',
                    len(self._delivered))
                unrestored = self.restore_unacked()

                if unrestored:
                    errors, messages = list(zip(*unrestored))
                    say('UNABLE TO RESTORE {0} MESSAGES: {1}',
                        len(errors), errors)
                    emergency_dump_state(messages)
        finally:
            state.restored = True