コード例 #1
0
 def _run_callback(self, callback, *args, **kwargs):
     try:
         # Use a NullContext to ensure that all StackContexts are run
         # inside our blanket exception handler rather than outside.
         with stack_context.NullContext():
             callback(*args, **kwargs)
     except:
         logging.error("Uncaught exception, closing connection.",
                       exc_info=True)
         # Close the socket on an uncaught exception from a user callback
         # (It would eventually get closed when the socket object is
         # gc'd, but we don't want to rely on gc happening before we
         # run out of file descriptors)
         self.close()
         # Re-raise the exception so that IOLoop.handle_callback_exception
         # can see it and log the error
         raise
コード例 #2
0
 def add_callback_from_signal(self, callback, *args, **kwargs):
     with stack_context.NullContext():
         if thread.get_ident() != self._thread_ident:
             # if the signal is handled on another thread, we can add
             # it normally (modulo the NullContext)
             self.add_callback(callback, *args, **kwargs)
         else:
             # If we're on the IOLoop's thread, we cannot use
             # the regular add_callback because it may deadlock on
             # _callback_lock.  Blindly insert into self._callbacks.
             # This is safe because the GIL makes list.append atomic.
             # One subtlety is that if the signal interrupted the
             # _callback_lock block in IOLoop.start, we may modify
             # either the old or new version of self._callbacks,
             # but either way will work.
             self._callbacks.append(functools.partial(
                 stack_context.wrap(callback), *args, **kwargs))
コード例 #3
0
ファイル: beanstalkt.py プロジェクト: microdog/beanstalkt
    def _process_queue(self):
        if self._talking or not self._queue:
            return
        # pop a request of the queue and perform the send-receive interaction
        self._talking = True
        with stack_context.NullContext():
            req, cb = self._queue.popleft()
            command = req.cmd + b'\r\n'
            if req.body:
                command += req.body + b'\r\n'

            # write command and body to socket stream
            self._stream.write(command,
                    # when command is written: read line from socket stream
                    lambda: self._stream.read_until(b'\r\n',
                    # when a line has been read: return status and results
                    lambda data: self._recv(req, data, cb)))
コード例 #4
0
ファイル: iohandling.py プロジェクト: akatrevorjay/pyrox
        def _callback_wrapper():
            try:
                # Use a NullContext to ensure that all StackContexts are run
                # inside our blanket exception handler rather than outside.
                with stack_context.NullContext():
                    callback(*args, **kwargs)
            except Exception as ex:
                gen_log.error("Uncaught exception: %s", ex)

                # Close the socket on an uncaught exception from a user callback
                # (It would eventually get closed when the socket object is
                # gc'd, but we don't want to rely on gc happening before we
                # run out of file descriptors)
                channel.close()

                # Re-raise the exception so that IOLoop.handle_callback_exception
                # can see it and log the error
                raise
コード例 #5
0
    def _process_queue(self):
        with stack_context.NullContext():
            if len(self.queue) and len(self.active) < 1:
                key, request, callback = self.queue.popleft()
                self.active[key] = (request, callback)
                release_callback = functools.partial(self._release_fetch, key)
                self._handle_request(request, release_callback, callback)

            if len(self.queue) == 0 and len(self.active) == 0:
                now = self.io_loop.time()
                self._idle_timeout_callback = self.io_loop.add_timeout(
                    now + self.idle_timeout,
                    stack_context.wrap(self._on_idle_timeout))

            else:

                if self._idle_timeout_callback:
                    self.io_loop.remove_timeout(self._idle_timeout_callback)
                    self._idle_timeout_callback = None
コード例 #6
0
ファイル: server_log.py プロジェクト: zorro0799/viewfinder
    def emit(self, record):
        """Emits the specified record by writing it to the in-memory log
    handler. If the size of the in-memory handler's buffer exceeds
    _max_buffer_bytes, flushes it to the object store.
    """
        if self._closing:
            return

        if self._buffer is None:
            self._NewBatch()

        self._inner_handler.emit(record)
        if self._buffer.tell() >= self._max_buffer_bytes:
            self.flush()
        elif not self._flush_timeout:
            deadline = self._start_timestamp + self._flush_interval_secs
            with stack_context.NullContext():
                self._flush_timeout = IOLoop.current().add_timeout(
                    deadline, self.flush)
コード例 #7
0
  def _ScanAbandonedLocks(self):
    """Periodically scans the Locks table looking for abandoned operation
    locks. If any are found, the associated operations are executed.

    TODO(Andy): Scanning for abandoned locks really should go into a
                LockManager class. See header for lock.py.
    """
    max_timeout_secs = OpManager._MAX_SCAN_ABANDONED_LOCKS_INTERVAL.total_seconds()
    while True:
      # If there are too many active users, do not scan.
      if len(self._active_users) < self._MAX_USERS_OUTSTANDING:
        try:
          last_key = None
          while True:
            limit = min(self._MAX_USERS_OUTSTANDING - len(self._active_users), OpManager._SCAN_LIMIT)
            locks, last_key = yield gen.Task(Lock.ScanAbandoned,
                                             self._client,
                                             limit=limit,
                                             excl_start_key=last_key)

            for lock in locks:
              resource_type, resource_id = Lock.DeconstructLockId(lock.lock_id)
              if resource_type == LockResourceType.Operation:
                user_id = int(resource_id)
                logging.info('scanned operation lock for user %d' % user_id)
                # Create a clean context for this operation since we're not blocking the current
                # coroutine on it.
                with stack_context.NullContext():
                  with util.ExceptionBarrier(util.LogExceptionCallback):
                    self.MaybeExecuteOp(self._client, user_id, lock.resource_data)

            # Keep iterating until all abandoned locks have been found, otherwise wait until the next scan time.
            if last_key is None:
              break
        except Exception:
          logging.exception('abandoned lock scan failed')

      # Wait until next scan time.
      timeout_secs = random.random() * max_timeout_secs
      timeout_time = time.time() + timeout_secs
      logging.debug('next scan in %.2fs' % timeout_secs)
      yield gen.Task(IOLoop.current().add_timeout, timeout_time)
コード例 #8
0
 def __init__(self, socket, io_loop=None, max_buffer_size=104857600,
              read_chunk_size=4096):
     self.socket = socket
     self.socket.setblocking(False)
     self.io_loop = io_loop or ioloop.IOLoop.instance()
     self.max_buffer_size = max_buffer_size
     self.read_chunk_size = read_chunk_size
     self._read_buffer = ""
     self._write_buffer = ""
     self._read_delimiter = None
     self._read_bytes = None
     self._read_callback = None
     self._write_callback = None
     self._close_callback = None
     self._connect_callback = None
     self._connecting = False
     self._state = self.io_loop.ERROR
     with stack_context.NullContext():
         self.io_loop.add_handler(
             self.socket.fileno(), self._handle_events, self._state)
コード例 #9
0
    def process_pending_requests(self):
        with stack_context.NullContext():
            while (self.connection and self.connection.is_ready
                   and len(self.active_requests) < self.max_active_requests
                   and self.pending_requests):
                log.debug('Processing a new pending request!')
                key, request, callback = self.pending_requests.popleft()
                if key not in self.queue_timeouts:
                    continue

                request, callback, timeout_handle = self.queue_timeouts[key]
                if timeout_handle is not None:
                    IOLoop.current().remove_timeout(timeout_handle)
                del self.queue_timeouts[key]

                self.active_requests[key] = (request, callback)
                remove_from_active_cb = functools.partial(
                    self.remove_active, key)

                self.handle_request(request, remove_from_active_cb, callback)
コード例 #10
0
 def _run_callback(self, callback, *args):
     def wrapper():
         self._pending_callbacks -= 1
         try:
             callback(*args)
         except Exception:
             logging.error("Uncaught exception, closing connection.", exc_info=True)
             self.close() # 回调函数时遇到未捕获的异常就直接关闭连接,防止依赖GC可能会用光FD
             raise
         self._maybe_add_error_listener()
     # 以上的wrapper是将callback加入到ioloop中由ioloop来调度执行,把callback推迟到下一轮ioloop的原因是:
     #   1. 避免callback互相调用,调用栈无限增大
     #   2. 为不可重入的互斥体提供一个可预测的执行上下文
     #   3. 确保wrapper中的try/except运行在application的StackContexts之外
     with stack_context.NullContext():
         # stack_context was already captured in callback, we don't need to capture it again for IOStream's wrapper.
         # This is especially important if the callback was pre-wrapped before entry to IOStream
         # (as in HTTPConnection._header_callback), as we could capture and leak the wrong context here.
         self._pending_callbacks += 1
         self.io_loop.add_callback(wrapper)
コード例 #11
0
  def _ScanFailedOps(self):
    """Periodically scans the Operation table for operations which have failed and are ready
    to retry. If any are found, they are retried to see if the error that originally caused
    them to fail has been fixed.
    """
    from viewfinder.backend.db.operation import Operation

    max_timeout_secs = OpManager._MAX_SCAN_FAILED_OPS_INTERVAL.total_seconds()
    while True:
      # If there are too many active users, do not scan.
      if len(self._active_users) < self._MAX_USERS_OUTSTANDING:
        try:
          last_key = None
          while True:
            limit = min(self._MAX_USERS_OUTSTANDING - len(self._active_users), OpManager._SCAN_LIMIT)
            ops, last_key = yield gen.Task(Operation.ScanFailed,
                                           self._client,
                                           limit=limit,
                                           excl_start_key=last_key)

            # Add each operation to the queue for the owning user.
            for op in ops:
              logging.info('scanned failed operation "%s" for user %d' % (op.operation_id, op.user_id))
              if op.user_id not in self._active_users:
                # Create a clean context for this operation since we're not blocking the current
                # coroutine on it.
                with stack_context.NullContext():
                  with util.ExceptionBarrier(util.LogExceptionCallback):
                    self.MaybeExecuteOp(self._client, op.user_id, op.operation_id)

            # Keep iterating until all failed operations have been found, otherwise wait until the next scan time.
            if last_key is None:
              break
        except Exception:
          logging.exception('failed op scan failed')

      # Wait until next scan time.
      timeout_secs = random.random() * max_timeout_secs
      timeout_time = time.time() + timeout_secs
      logging.debug('next scan in %.2fs' % timeout_secs)
      yield gen.Task(IOLoop.current().add_timeout, timeout_time)
コード例 #12
0
    def _process_queue(self):
        with stack_context.NullContext():
            while True:
                started = 0
                while self._free_list and self._requests:
                    started += 1
                    curl = self._free_list.pop()
                    (request, callback) = self._requests.popleft()
                    curl.info = {
                        "headers": httputil.HTTPHeaders(),
                        "buffer": BytesIO(),
                        "request": request,
                        "callback": callback,
                        "curl_start_time": time.time(),
                    }
                    _curl_setup_request(curl, request, curl.info["buffer"],
                                        curl.info["headers"])
                    self._multi.add_handle(curl)

                if not started:
                    break
コード例 #13
0
    def _process_queue(self):
        with stack_context.NullContext():
            while True:
                started = 0
                while self._free_list and self._requests:
                    started += 1
                    curl = self._free_list.pop()
                    (request, callback, queue_start_time) = self._requests.popleft()
                    curl.info = {
                        "headers": httputil.HTTPHeaders(),
                        "buffer": BytesIO(),
                        "request": request,
                        "callback": callback,
                        "queue_start_time": queue_start_time,
                        "curl_start_time": time.time(),
                        "curl_start_ioloop_time": self.io_loop.current().time(),
                    }
                    try:
                        self._curl_setup_request(
                            curl, request, curl.info["buffer"],
                            curl.info["headers"])
                    except Exception as e:
                        # If there was an error in setup, pass it on
                        # to the callback. Note that allowing the
                        # error to escape here will appear to work
                        # most of the time since we are still in the
                        # caller's original stack frame, but when
                        # _process_queue() is called from
                        # _finish_pending_requests the exceptions have
                        # nowhere to go.
                        self._free_list.append(curl)
                        callback(HTTPResponse(
                            request=request,
                            code=599,
                            error=e))
                    else:
                        self._multi.add_handle(curl)

                if not started:
                    break
コード例 #14
0
    def _add_io_state(self, state):
        """Adds `state` (IOLoop.{READ,WRITE} flags) to our event handler.

        Implementation notes: Reads and writes have a fast path and a
        slow path.  The fast path reads synchronously from socket
        buffers, while the slow path uses `_add_io_state` to schedule
        an IOLoop callback.  Note that in both cases, the callback is
        run asynchronously with `_run_callback`.

        To detect closed connections, we must have called
        `_add_io_state` at some point, but we want to delay this as
        much as possible so we don't have to set an `IOLoop.ERROR`
        listener that will be overwritten by the next slow-path
        operation.  As long as there are callbacks scheduled for
        fast-path ops, those callbacks may do more reads.
        If a sequence of fast-path ops do not end in a slow-path op,
        (e.g. for an @asynchronous long-poll request), we must add
        the error handler.  This is done in `_run_callback` and `write`
        (since the write callback is optional so we can have a
        fast-path write with no `_run_callback`)
        """

        # 为IOLoop对象的handler注册IOLoop.READ或IOLoop.WRITE状态,
        # handler为IOStream对象的_handle_events方法。

        # _add_io_state,主要是【更新】自身的状态并告诉ioloop监听新事件。
        if self.closed():
            # connection has been closed, so there can be no future events
            return
        if self._state is None:
            self._state = ioloop.IOLoop.ERROR | state
            # #将客户端socket句柄添加的epoll中,并将IOStream的_handle_events方法添加到 Start 的While循环中
            #Start 的While循环中监听客户端socket句柄的状态,以便再最后调用IOStream的_handle_events方法把处理后的信息响应给用户
            with stack_context.NullContext():
                self.io_loop.add_handler(self.fileno(), self._handle_events,
                                         self._state)
        elif not self._state & state:
            self._state = self._state | state
            self.io_loop.update_handler(self.fileno(), self._state)
コード例 #15
0
    def _run_callback(self, callback, *args):
        def wrapper():
            self._pending_callbacks -= 1
            try:
                callback(*args)
            except Exception:
                #logging.error("Uncaught exception, closing connection.",
                #              exc_info=True)
                ht.logger.error("Uncaught exception, closing connection.",
                                exc_info=True)
                # Close the socket on an uncaught exception from a user callback
                # (It would eventually get closed when the socket object is
                # gc'd, but we don't want to rely on gc happening before we
                # run out of file descriptors)
                self.close()
                # Re-raise the exception so that IOLoop.handle_callback_exception
                # can see it and log the error
                raise
            self._maybe_add_error_listener()

        # We schedule callbacks to be run on the next IOLoop iteration
        # rather than running them directly for several reasons:
        # * Prevents unbounded stack growth when a callback calls an
        #   IOLoop operation that immediately runs another callback
        # * Provides a predictable execution context for e.g.
        #   non-reentrant mutexes
        # * Ensures that the try/except in wrapper() is run outside
        #   of the application's StackContexts
        with stack_context.NullContext():
            # stack_context was already captured in callback, we don't need to
            # capture it again for IOStream's wrapper.  This is especially
            # important if the callback was pre-wrapped before entry to
            # IOStream (as in HTTPConnection._header_callback), as we could
            # capture and leak the wrong context here.
            self._pending_callbacks += 1
            self.io_loop.add_callback(wrapper)
コード例 #16
0
 def get(self):
     self.redirect(self._url, permanent=self._permanent)
     # Don't block the user request while we fire an asynchronous
     # request to GA.
     with stack_context.NullContext():
         IOLoop.current().add_callback(self.LogAnalytics)
コード例 #17
0
ファイル: util.py プロジェクト: dervn/shubz
def delay_call(func, *arg, **kwargs):
    with stack_context.NullContext():
        io = ioloop.IOLoop.instance()
        io.add_callback(functools.partial(func, *arg, **kwargs))
コード例 #18
0
ファイル: server.py プロジェクト: zorro0799/viewfinder
def StartServer(serve_webapp=True, serve_static_web=True, serve_admin=True):
    """Initialize the datastore and operation manager with the viewfinder schema. This typically
  verifies the schema. If the schema does not yet exist, it is created.

  Defines settings dictionary and sets up main application with list of handlers.
  """
    client = db_client.DBClient.Instance()

    settings = {
        'gzip':
        True,
        'login_url':
        '/',
        'admin_login_url':
        '/admin/otp',
        'domain':
        options.options.domain,
        'server_version':
        options.options.server_version,
        'cookie_secret':
        secrets.GetSecret('cookie_secret'),
        'facebook_api_key':
        secrets.GetSecret('facebook_api_key'),
        'facebook_secret':
        secrets.GetSecret('facebook_secret'),
        'google_client_id':
        secrets.GetSecret('google_client_id'),
        'google_client_secret':
        secrets.GetSecret('google_client_secret'),
        'google_client_mobile_id':
        secrets.GetSecret('google_client_mobile_id'),
        'google_client_mobile_secret':
        secrets.GetSecret('google_client_mobile_secret'),
        'template_path':
        ResourcesManager.Instance().template_path,
        'ui_modules':
        uimodules,
        'xsrf_cookies':
        options.options.enable_xsrf,
        'debug':
        options.options.server_debug,
        'static_path':
        ResourcesManager.Instance().static_path,
    }

    if options.options.log_file_prefix:
        settings['logs_dir'] = os.path.dirname(options.options.log_file_prefix)

    # Configure metrics uploading.
    if options.options.upload_metrics:
        for interval in metric.METRIC_INTERVALS:
            metric.Metric.StartMetricUpload(client,
                                            metric.DEFAULT_CLUSTER_NAME,
                                            interval)

    # Setup application and SSL HTTP server.
    handlers = deepcopy(COMMON_HANDLERS)
    if serve_webapp:
        # Configure web application handlers.
        webapp_handlers = deepcopy(WEBAPP_HANDLERS)

        # Initialize the file object store if specified.
        obj_store = ObjectStore.GetInstance(ObjectStore.PHOTO)
        settings['obj_store'] = obj_store
        if options.options.fileobjstore:
            for store_name, content_type in ((ObjectStore.PHOTO,
                                              r'image/jpeg'),
                                             (ObjectStore.USER_LOG,
                                              r'text/plain'),
                                             (ObjectStore.USER_ZIPS,
                                              r'application/zip')):
                webapp_handlers.append(
                    (r'/fileobjstore/%s/(.*)' % store_name,
                     file_object_store.FileObjectStoreHandler, {
                         'storename': store_name,
                         'contenttype': content_type
                     }))

        if ServerEnvironment.IsDevBox():
            webapp_handlers.append((r'/(link|login|register)/fakeviewfinder',
                                    auth_viewfinder.FakeAuthViewfinderHandler))
            # Set the testing directories.
            if options.options.testing_path is not None:
                webapp_handlers.append(
                    (r'/testing/hook/(.*)', test_hook.TestHookHandler))
                webapp_handlers.append(
                    (r'/testing/static/(.*)', web.StaticFileHandler, {
                        'path': '%s' % options.options.testing_path
                    }))

        handlers.extend(webapp_handlers)

    if serve_static_web:
        # Configure static web handlers.
        static_web_handlers = deepcopy(STATIC_WEB_HANDLERS)
        handlers.extend(static_web_handlers)

    if serve_admin:
        # Configure and verify admin handlers.
        admin_handlers = deepcopy(ADMIN_HANDLERS)
        for path, handler in admin_handlers:
            if not issubclass(handler, basic_auth.BasicAuthHandler):
                raise TypeError('Administration handlers must '
                                'subclass BasicAuthHandler')
        handlers.extend(admin_handlers)

    # Catch-all handler for 404 pages.
    handlers.extend([(r'/.*', base.PageNotFoundHandler)])

    # Create application and separately add handlers for the short domain and the
    # regular domain.
    #
    # Note that, although the short-domain handlers are added after the initial construction
    # of the Application, those routes will take priority over the routes in the handlers
    # array.
    application = web.Application(handlers, **settings)
    application.add_handlers(re.escape(options.options.short_domain),
                             SHORT_DOMAIN_HANDLERS)

    # Start the HTTP server.
    http_server = httpserver.HTTPServer(
        application,
        xheaders=options.options.xheaders,
        ssl_options={
            'certfile': secrets.GetSecretFile('%s.crt' % settings['domain']),
            'keyfile': secrets.GetSecretFile('%s.key' % settings['domain']),
        } if options.options.ssl else None)
    with stack_context.NullContext():
        http_server.listen(options.options.port)

    # Setup redirect server for HTTP -> HTTPS.
    if options.options.ssl:
        http_settings = {
            'host': ServerEnvironment.GetHost(),
            'redirect_port': options.options.redirect_port,
            'xheaders': options.options.xheaders,
        }

        redirect_handlers = [
            (r'/(.*)', index.RedirectHandler),
        ]
        redirect_server = httpserver.HTTPServer(
            web.Application(redirect_handlers, **http_settings))
        with stack_context.NullContext():
            redirect_server.listen(options.options.insecure_port)

    # Ensure that system users have been created if running with a local db (needs server to be running).
    if options.options.localdb:
        yield CreateSystemUsers(client)

    # Run the server until it hits an exception or stop signal.
    yield gen.Task(lambda callback: None)
コード例 #19
0
ファイル: zmqstream.py プロジェクト: tlockney/Computable
 def _init_io_state(self):
     """initialize the ioloop event handler"""
     with stack_context.NullContext():
         self.io_loop.add_handler(self.socket, self._handle_events, self._state)
コード例 #20
0
 def spawn_callback(self, callback, *args, **kwargs):
     with stack_context.NullContext():
         self.add_callback(callback, *args, **kwargs)
コード例 #21
0
ファイル: ioloop.py プロジェクト: www3838438/tornado
 def add_callback_from_signal(self, callback, *args, **kwargs):
     with stack_context.NullContext():
         self.add_callback(callback, *args, **kwargs)
コード例 #22
0
ファイル: operation.py プロジェクト: zorro0799/viewfinder
    def CreateAndExecute(
            cls,
            client,
            user_id,
            device_id,
            method,
            args,
            callback,
            message_version=message.MAX_SUPPORTED_MESSAGE_VERSION):
        """Creates a new operation with 'method' and 'args' describing the operation. After
    successfully creating the operation, the operation is asynchronously executed. Returns
    the op that was executed.
    """
        # Get useful headers and strip all else.
        headers = args.pop('headers', {})
        synchronous = headers.pop('synchronous', False)

        # Validate the op_id and op_timestamp fields.
        op_id = headers.pop('op_id', None)
        op_timestamp = headers.pop('op_timestamp', None)
        assert (op_id is not None) == (op_timestamp
                                       is not None), (op_id, op_timestamp)

        # Validate that op_id is correctly formed and is allowed to be generated by the current device.
        # No need to do this if the op_id was generated by the system as part of message upgrade.
        if op_id is not None and headers.get(
                'original_version',
                0) >= message.Message.ADD_OP_HEADER_VERSION:
            yield Operation.VerifyOperationId(client, user_id, device_id,
                                              op_id)

        # Use the op_id provided by the user, or generate a system op-id.
        if op_id is None:
            op_id = yield gen.Task(Operation.AllocateSystemOperationId, client)

        # Possibly migrate backwards to a message version that is compatible with older versions of the
        # server that may still be running.
        op_message = message.Message(
            args, default_version=message.MAX_MESSAGE_VERSION)
        yield gen.Task(op_message.Migrate,
                       client,
                       migrate_version=message_version,
                       migrators=OpManager.Instance().op_map[method].migrators)

        op = Operation(user_id, op_id)
        op.device_id = device_id
        op.method = method
        op.json = json.dumps(args)
        op.attempts = 0

        # Set timestamp to header value if it was specified, or current timestamp if not.
        if op_timestamp is not None:
            op.timestamp = op_timestamp
        else:
            op.timestamp = util.GetCurrentTimestamp()

        # Set expired backoff so that if this process fails before the op can be executed, in the worst
        # case it will eventually get picked up by the OpManager's scan for failed ops. Note that in
        # rare cases, this may mean that the op gets picked up immediately by another server (i.e. even
        # though the current server has *not* failed), but that is fine -- it doesn't really matter what
        # server executes the op, it just matters that the op gets executed in a timely manner.
        op.backoff = 0

        # Try to create the operation if it does not yet exist.
        try:
            yield gen.Task(op.Update, client, expected={'operation_id': False})

            # Execute the op according to the 'synchronous' parameter. If 'synchronous' is True, the
            # callback is invoked only after the operation has completed. Useful during unittests to
            # ensure the mutations wrought by the operation are queryable.
            logging.info('PERSIST: user: %d, device: %d, op: %s, method: %s' %
                         (user_id, device_id, op_id, method))
        except Exception:
            # Return existing op.
            logging.warning('operation "%s" already exists', op_id)
            existing_op = yield gen.Task(Operation.Query,
                                         client,
                                         user_id,
                                         op_id,
                                         None,
                                         must_exist=False)
            if existing_op is not None:
                op = existing_op

        # If not synchronous, we fire the callback, but continue to execute.
        if not synchronous:
            callback(op)

            # Establish new "clean" context in which to execute the operation. The operation should not rely
            # on any context, since it may end up run on a completely different machine. In addition, establish
            # an exception barrier in order to handle any bugs or asserts, rather than letting the context
            # established for the request handle it, since it will have already completed).
            with stack_context.NullContext():
                with util.ExceptionBarrier(util.LogExceptionCallback):
                    OpManager.Instance().MaybeExecuteOp(
                        client, user_id, op.operation_id)
        else:
            # Let exceptions flow up to request context so they'll be put into an error response.
            OpManager.Instance().MaybeExecuteOp(client, user_id,
                                                op.operation_id,
                                                partial(callback, op))